mirror of
https://github.com/autistic-symposium/tensorflow-for-deep-learning-py.git
synced 2025-05-12 03:34:59 -04:00
clean up
This commit is contained in:
parent
da1b22cf4a
commit
db89b720d8
13 changed files with 2873 additions and 0 deletions
31
Numpy/gradient.py
Normal file
31
Numpy/gradient.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
# compute the gradient numerically:
|
||||
# a generic function takes a function f, a vector x o evaluate
|
||||
# the gradient on, and returns the gradient of f at x:
|
||||
|
||||
def eval_numerical_gradient(f, x):
|
||||
"""
|
||||
a naive implementation of numerical gradient of f at x
|
||||
- f should be a function that takes a single argument
|
||||
- x is the point (numpy array) to evaluate the gradient at
|
||||
"""
|
||||
|
||||
fx = f(x) # evaluate function value at original point
|
||||
grad = np.zeros(x.shape)
|
||||
h = 0.00001
|
||||
|
||||
# iterate over all indexes in x
|
||||
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
|
||||
while not it.finished:
|
||||
|
||||
# evaluate function at x+h
|
||||
ix = it.multi_index
|
||||
old_value = x[ix]
|
||||
x[ix] = old_value + h # increment by h
|
||||
fxh = f(x) # evalute f(x + h)
|
||||
x[ix] = old_value # restore to previous value (very important!)
|
||||
|
||||
# compute the partial derivative
|
||||
grad[ix] = (fxh - fx) / h # the slope
|
||||
it.iternext() # step to next dimension
|
||||
|
||||
return grad
|
Loading…
Add table
Add a link
Reference in a new issue