mirror of
https://github.com/autistic-symposium/tensorflow-for-deep-learning-py.git
synced 2025-05-12 11:42:14 -04:00
👾 Add some machine learning experiments
This commit is contained in:
parent
cab89257dd
commit
729da96322
60 changed files with 39649 additions and 0 deletions
25
machine_learning_examples/numpy_examples/dropout.py
Normal file
25
machine_learning_examples/numpy_examples/dropout.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
"""
|
||||
Inverted Dropout: Recommended implementation example.
|
||||
We drop and scale at train time and don't do anything at test time.
|
||||
"""
|
||||
|
||||
p = 0.5 # probability of keeping a unit active. higher = less dropout
|
||||
|
||||
def train_step(X):
|
||||
# forward pass for example 3-layer neural network
|
||||
H1 = np.maximum(0, np.dot(W1, X) + b1)
|
||||
U1 = (np.random.rand(*H1.shape) < p) / p # first dropout mask. Notice /p!
|
||||
H1 *= U1 # drop!
|
||||
H2 = np.maximum(0, np.dot(W2, H1) + b2)
|
||||
U2 = (np.random.rand(*H2.shape) < p) / p # second dropout mask. Notice /p!
|
||||
H2 *= U2 # drop!
|
||||
out = np.dot(W3, H2) + b3
|
||||
|
||||
# backward pass: compute gradients... (not shown)
|
||||
# perform parameter update... (not shown)
|
||||
|
||||
def predict(X):
|
||||
# ensembled forward pass
|
||||
H1 = np.maximum(0, np.dot(W1, X) + b1) # no scaling necessary
|
||||
H2 = np.maximum(0, np.dot(W2, H1) + b2)
|
||||
out = np.dot(W3, H2) + b3
|
Loading…
Add table
Add a link
Reference in a new issue