From 6c2be2bf65357757e0e50c311be9acfdf7bc1f5d Mon Sep 17 00:00:00 2001 From: Marina von Steinkirch Date: Sun, 14 Aug 2016 21:56:44 -0700 Subject: [PATCH] numpy readme --- Numpy/README.md | 60 ++++++++++++++++++++++++++++++++++++++++++++++++ Numpy/dropout.py | 25 ++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 Numpy/dropout.py diff --git a/Numpy/README.md b/Numpy/README.md index e69de29..5a019de 100644 --- a/Numpy/README.md +++ b/Numpy/README.md @@ -0,0 +1,60 @@ +## Numpy Resources + +### Arrays + +* Grid of values, all of the same type. +* The number of dimensions is the rank of the array. +* The shape of an array is a tuple of integers giving the size of the array along each dimension. + + +``` +a = np.array([1, 2, 3]) # Create a rank 1 array +print a.shape # Prints "(3,)" +``` + +``` +numpy.asarray([]) +numpy.asarray([]).shape +``` + + +* Many functions to create arrays: + +``` +a = np.zeros((2,2)) # Create an array of all zeros +b = np.ones((1,2)) # Create an array of all ones +c = np.full((2,2), 7) # Create a constant array +d = np.eye(2) # Create a 2x2 identity matrix +e = np.random.random((2,2)) # Create an array filled with random values +``` + +* Products: + + +``` +x = np.array([[1,2],[3,4]]) + +v = np.array([9,10]) +w = np.array([11, 12]) + +# Inner product of vectors +print v.dot(w) +print np.dot(v, w) + +# Matrix / vector product +print x.dot(v) +print np.dot(x, v) +``` + +* Sum: + +``` +print np.sum(x) # Compute sum of all elements +print np.sum(x, axis=0) # Compute sum of each column +print np.sum(x, axis=1) # Compute sum of each row +``` + +* Broadcasting is a mechanism that allows numpy to work with arrays of different shapes when performing arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array multiple times to perform some operation on the larger array. + + + diff --git a/Numpy/dropout.py b/Numpy/dropout.py new file mode 100644 index 0000000..077a06e --- /dev/null +++ b/Numpy/dropout.py @@ -0,0 +1,25 @@ +""" +Inverted Dropout: Recommended implementation example. +We drop and scale at train time and don't do anything at test time. +""" + +p = 0.5 # probability of keeping a unit active. higher = less dropout + +def train_step(X): + # forward pass for example 3-layer neural network + H1 = np.maximum(0, np.dot(W1, X) + b1) + U1 = (np.random.rand(*H1.shape) < p) / p # first dropout mask. Notice /p! + H1 *= U1 # drop! + H2 = np.maximum(0, np.dot(W2, H1) + b2) + U2 = (np.random.rand(*H2.shape) < p) / p # second dropout mask. Notice /p! + H2 *= U2 # drop! + out = np.dot(W3, H2) + b3 + + # backward pass: compute gradients... (not shown) + # perform parameter update... (not shown) + +def predict(X): + # ensembled forward pass + H1 = np.maximum(0, np.dot(W1, X) + b1) # no scaling necessary + H2 = np.maximum(0, np.dot(W2, H1) + b2) + out = np.dot(W3, H2) + b3 \ No newline at end of file