mirror of
https://github.com/autistic-symposium/tensorflow-for-deep-learning-py.git
synced 2025-05-10 18:54:57 -04:00
numpy readme
This commit is contained in:
parent
db89b720d8
commit
6c2be2bf65
2 changed files with 85 additions and 0 deletions
|
@ -0,0 +1,60 @@
|
|||
## Numpy Resources
|
||||
|
||||
### Arrays
|
||||
|
||||
* Grid of values, all of the same type.
|
||||
* The number of dimensions is the rank of the array.
|
||||
* The shape of an array is a tuple of integers giving the size of the array along each dimension.
|
||||
|
||||
|
||||
```
|
||||
a = np.array([1, 2, 3]) # Create a rank 1 array
|
||||
print a.shape # Prints "(3,)"
|
||||
```
|
||||
|
||||
```
|
||||
numpy.asarray([])
|
||||
numpy.asarray([]).shape
|
||||
```
|
||||
|
||||
|
||||
* Many functions to create arrays:
|
||||
|
||||
```
|
||||
a = np.zeros((2,2)) # Create an array of all zeros
|
||||
b = np.ones((1,2)) # Create an array of all ones
|
||||
c = np.full((2,2), 7) # Create a constant array
|
||||
d = np.eye(2) # Create a 2x2 identity matrix
|
||||
e = np.random.random((2,2)) # Create an array filled with random values
|
||||
```
|
||||
|
||||
* Products:
|
||||
|
||||
|
||||
```
|
||||
x = np.array([[1,2],[3,4]])
|
||||
|
||||
v = np.array([9,10])
|
||||
w = np.array([11, 12])
|
||||
|
||||
# Inner product of vectors
|
||||
print v.dot(w)
|
||||
print np.dot(v, w)
|
||||
|
||||
# Matrix / vector product
|
||||
print x.dot(v)
|
||||
print np.dot(x, v)
|
||||
```
|
||||
|
||||
* Sum:
|
||||
|
||||
```
|
||||
print np.sum(x) # Compute sum of all elements
|
||||
print np.sum(x, axis=0) # Compute sum of each column
|
||||
print np.sum(x, axis=1) # Compute sum of each row
|
||||
```
|
||||
|
||||
* Broadcasting is a mechanism that allows numpy to work with arrays of different shapes when performing arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array multiple times to perform some operation on the larger array.
|
||||
|
||||
|
||||
|
25
Numpy/dropout.py
Normal file
25
Numpy/dropout.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
"""
|
||||
Inverted Dropout: Recommended implementation example.
|
||||
We drop and scale at train time and don't do anything at test time.
|
||||
"""
|
||||
|
||||
p = 0.5 # probability of keeping a unit active. higher = less dropout
|
||||
|
||||
def train_step(X):
|
||||
# forward pass for example 3-layer neural network
|
||||
H1 = np.maximum(0, np.dot(W1, X) + b1)
|
||||
U1 = (np.random.rand(*H1.shape) < p) / p # first dropout mask. Notice /p!
|
||||
H1 *= U1 # drop!
|
||||
H2 = np.maximum(0, np.dot(W2, H1) + b2)
|
||||
U2 = (np.random.rand(*H2.shape) < p) / p # second dropout mask. Notice /p!
|
||||
H2 *= U2 # drop!
|
||||
out = np.dot(W3, H2) + b3
|
||||
|
||||
# backward pass: compute gradients... (not shown)
|
||||
# perform parameter update... (not shown)
|
||||
|
||||
def predict(X):
|
||||
# ensembled forward pass
|
||||
H1 = np.maximum(0, np.dot(W1, X) + b1) # no scaling necessary
|
||||
H2 = np.maximum(0, np.dot(W2, H1) + b2)
|
||||
out = np.dot(W3, H2) + b3
|
Loading…
Add table
Add a link
Reference in a new issue