mirror of
				https://github.com/autistic-symposium/tensorflow-for-deep-learning-py.git
				synced 2025-10-31 12:58:55 -04:00 
			
		
		
		
	
		
			
				
	
	
		
			31 lines
		
	
	
	
		
			966 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			31 lines
		
	
	
	
		
			966 B
		
	
	
	
		
			Python
		
	
	
	
	
	
| # compute the gradient numerically:
 | |
| # a generic function takes a function f, a vector x o evaluate 
 | |
| # the gradient on, and returns the gradient of f at x:
 | |
| 
 | |
| def eval_numerical_gradient(f, x):
 | |
|   """ 
 | |
|   a naive implementation of numerical gradient of f at x 
 | |
|   - f should be a function that takes a single argument
 | |
|   - x is the point (numpy array) to evaluate the gradient at
 | |
|   """ 
 | |
| 
 | |
|   fx = f(x) # evaluate function value at original point
 | |
|   grad = np.zeros(x.shape)
 | |
|   h = 0.00001
 | |
| 
 | |
|   # iterate over all indexes in x
 | |
|   it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
 | |
|   while not it.finished:
 | |
| 
 | |
|     # evaluate function at x+h
 | |
|     ix = it.multi_index
 | |
|     old_value = x[ix]
 | |
|     x[ix] = old_value + h # increment by h
 | |
|     fxh = f(x) # evalute f(x + h)
 | |
|     x[ix] = old_value # restore to previous value (very important!)
 | |
| 
 | |
|     # compute the partial derivative
 | |
|     grad[ix] = (fxh - fx) / h # the slope
 | |
|     it.iternext() # step to next dimension
 | |
| 
 | |
|   return grad
 | 
