mirror of
https://github.com/autistic-symposium/tensorflow-for-deep-learning-py.git
synced 2025-05-13 12:12:17 -04:00
restart this repository
This commit is contained in:
parent
1d5577b7e8
commit
532fbd7342
21 changed files with 2 additions and 2 deletions
|
@ -1,25 +0,0 @@
|
|||
"""
|
||||
Inverted Dropout: Recommended implementation example.
|
||||
We drop and scale at train time and don't do anything at test time.
|
||||
"""
|
||||
|
||||
p = 0.5 # probability of keeping a unit active. higher = less dropout
|
||||
|
||||
def train_step(X):
|
||||
# forward pass for example 3-layer neural network
|
||||
H1 = np.maximum(0, np.dot(W1, X) + b1)
|
||||
U1 = (np.random.rand(*H1.shape) < p) / p # first dropout mask. Notice /p!
|
||||
H1 *= U1 # drop!
|
||||
H2 = np.maximum(0, np.dot(W2, H1) + b2)
|
||||
U2 = (np.random.rand(*H2.shape) < p) / p # second dropout mask. Notice /p!
|
||||
H2 *= U2 # drop!
|
||||
out = np.dot(W3, H2) + b3
|
||||
|
||||
# backward pass: compute gradients... (not shown)
|
||||
# perform parameter update... (not shown)
|
||||
|
||||
def predict(X):
|
||||
# ensembled forward pass
|
||||
H1 = np.maximum(0, np.dot(W1, X) + b1) # no scaling necessary
|
||||
H2 = np.maximum(0, np.dot(W2, H1) + b2)
|
||||
out = np.dot(W3, H2) + b3
|
Loading…
Add table
Add a link
Reference in a new issue