mirror of
https://github.com/autistic-symposium/tensorflow-for-deep-learning-py.git
synced 2025-05-12 11:42:14 -04:00
Clean up this scratch space
This commit is contained in:
parent
1de1667900
commit
ed0cead015
62 changed files with 39650 additions and 13 deletions
36
numpy_examples/Li.py
Normal file
36
numpy_examples/Li.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
def L_i(x, y, W):
|
||||
"""
|
||||
unvectorized version. Compute the multiclass svm loss for a single example (x,y)
|
||||
- x is a column vector representing an image (e.g. 3073 x 1 in CIFAR-10)
|
||||
with an appended bias dimension in the 3073-rd position (i.e. bias trick)
|
||||
- y is an integer giving index of correct class (e.g. between 0 and 9 in CIFAR-10)
|
||||
- W is the weight matrix (e.g. 10 x 3073 in CIFAR-10)
|
||||
"""
|
||||
delta = 1.0 # see notes about delta later in this section
|
||||
scores = W.dot(x) # scores becomes of size 10 x 1, the scores for each class
|
||||
correct_class_score = scores[y]
|
||||
D = W.shape[0] # number of classes, e.g. 10
|
||||
loss_i = 0.0
|
||||
for j in xrange(D): # iterate over all wrong classes
|
||||
if j == y:
|
||||
# skip for the true class to only loop over incorrect classes
|
||||
continue
|
||||
# accumulate loss for the i-th example
|
||||
loss_i += max(0, scores[j] - correct_class_score + delta)
|
||||
return loss_i
|
||||
|
||||
def L_i_vectorized(x, y, W):
|
||||
"""
|
||||
A faster half-vectorized implementation. half-vectorized
|
||||
refers to the fact that for a single example the implementation contains
|
||||
no for loops, but there is still one loop over the examples (outside this function)
|
||||
"""
|
||||
delta = 1.0
|
||||
scores = W.dot(x)
|
||||
# compute the margins for all classes in one vector operation
|
||||
margins = np.maximum(0, scores - scores[y] + delta)
|
||||
# on y-th position scores[y] - scores[y] canceled and gave delta. We want
|
||||
# to ignore the y-th position and only consider margin on max wrong class
|
||||
margins[y] = 0
|
||||
loss_i = np.sum(margins)
|
||||
return loss_i
|
23
numpy_examples/NeareastNeighbor.py
Normal file
23
numpy_examples/NeareastNeighbor.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
import numpy as np
|
||||
|
||||
class NearestNeighbor(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def train(self, X, y):
|
||||
self.Xtr = X
|
||||
self.ytr = y
|
||||
|
||||
def predict(self, X):
|
||||
num_test = X.shape[0]
|
||||
Ypred = np.zeros(num_test, dtype = self.ytr.dtype)
|
||||
|
||||
# loop over all test rows
|
||||
for i in xrange(num_test):
|
||||
# find the nearest training image to the i'th test image
|
||||
# using the L1 distance (sum of absolute value differences)
|
||||
distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1)
|
||||
min_index = np.argmin(distances) # get the index with smallest distance
|
||||
Ypred[i] = self.ytr[min_index] # predict the label of the nearest example
|
||||
|
||||
return Ypred
|
60
numpy_examples/README.md
Normal file
60
numpy_examples/README.md
Normal file
|
@ -0,0 +1,60 @@
|
|||
## Numpy Resources
|
||||
|
||||
### Arrays
|
||||
|
||||
* Grid of values, all of the same type.
|
||||
* The number of dimensions is the rank of the array.
|
||||
* The shape of an array is a tuple of integers giving the size of the array along each dimension.
|
||||
|
||||
|
||||
```
|
||||
a = np.array([1, 2, 3]) # Create a rank 1 array
|
||||
print a.shape # Prints "(3,)"
|
||||
```
|
||||
|
||||
```
|
||||
numpy.asarray([])
|
||||
numpy.asarray([]).shape
|
||||
```
|
||||
|
||||
|
||||
* Many functions to create arrays:
|
||||
|
||||
```
|
||||
a = np.zeros((2,2)) # Create an array of all zeros
|
||||
b = np.ones((1,2)) # Create an array of all ones
|
||||
c = np.full((2,2), 7) # Create a constant array
|
||||
d = np.eye(2) # Create a 2x2 identity matrix
|
||||
e = np.random.random((2,2)) # Create an array filled with random values
|
||||
```
|
||||
|
||||
* Products:
|
||||
|
||||
|
||||
```
|
||||
x = np.array([[1,2],[3,4]])
|
||||
|
||||
v = np.array([9,10])
|
||||
w = np.array([11, 12])
|
||||
|
||||
# Inner product of vectors
|
||||
print v.dot(w)
|
||||
print np.dot(v, w)
|
||||
|
||||
# Matrix / vector product
|
||||
print x.dot(v)
|
||||
print np.dot(x, v)
|
||||
```
|
||||
|
||||
* Sum:
|
||||
|
||||
```
|
||||
print np.sum(x) # Compute sum of all elements
|
||||
print np.sum(x, axis=0) # Compute sum of each column
|
||||
print np.sum(x, axis=1) # Compute sum of each row
|
||||
```
|
||||
|
||||
* Broadcasting is a mechanism that allows numpy to work with arrays of different shapes when performing arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array multiple times to perform some operation on the larger array.
|
||||
|
||||
|
||||
|
2776
numpy_examples/cs228-python-tutorial.ipynb
Executable file
2776
numpy_examples/cs228-python-tutorial.ipynb
Executable file
File diff suppressed because one or more lines are too long
25
numpy_examples/dropout.py
Normal file
25
numpy_examples/dropout.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
"""
|
||||
Inverted Dropout: Recommended implementation example.
|
||||
We drop and scale at train time and don't do anything at test time.
|
||||
"""
|
||||
|
||||
p = 0.5 # probability of keeping a unit active. higher = less dropout
|
||||
|
||||
def train_step(X):
|
||||
# forward pass for example 3-layer neural network
|
||||
H1 = np.maximum(0, np.dot(W1, X) + b1)
|
||||
U1 = (np.random.rand(*H1.shape) < p) / p # first dropout mask. Notice /p!
|
||||
H1 *= U1 # drop!
|
||||
H2 = np.maximum(0, np.dot(W2, H1) + b2)
|
||||
U2 = (np.random.rand(*H2.shape) < p) / p # second dropout mask. Notice /p!
|
||||
H2 *= U2 # drop!
|
||||
out = np.dot(W3, H2) + b3
|
||||
|
||||
# backward pass: compute gradients... (not shown)
|
||||
# perform parameter update... (not shown)
|
||||
|
||||
def predict(X):
|
||||
# ensembled forward pass
|
||||
H1 = np.maximum(0, np.dot(W1, X) + b1) # no scaling necessary
|
||||
H2 = np.maximum(0, np.dot(W2, H1) + b2)
|
||||
out = np.dot(W3, H2) + b3
|
31
numpy_examples/gradient.py
Normal file
31
numpy_examples/gradient.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
# compute the gradient numerically:
|
||||
# a generic function takes a function f, a vector x o evaluate
|
||||
# the gradient on, and returns the gradient of f at x:
|
||||
|
||||
def eval_numerical_gradient(f, x):
|
||||
"""
|
||||
a naive implementation of numerical gradient of f at x
|
||||
- f should be a function that takes a single argument
|
||||
- x is the point (numpy array) to evaluate the gradient at
|
||||
"""
|
||||
|
||||
fx = f(x) # evaluate function value at original point
|
||||
grad = np.zeros(x.shape)
|
||||
h = 0.00001
|
||||
|
||||
# iterate over all indexes in x
|
||||
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
|
||||
while not it.finished:
|
||||
|
||||
# evaluate function at x+h
|
||||
ix = it.multi_index
|
||||
old_value = x[ix]
|
||||
x[ix] = old_value + h # increment by h
|
||||
fxh = f(x) # evalute f(x + h)
|
||||
x[ix] = old_value # restore to previous value (very important!)
|
||||
|
||||
# compute the partial derivative
|
||||
grad[ix] = (fxh - fx) / h # the slope
|
||||
it.iternext() # step to next dimension
|
||||
|
||||
return grad
|
7
numpy_examples/neuron.py
Normal file
7
numpy_examples/neuron.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
class Neuron(object):
|
||||
# ...
|
||||
def forward(inputs):
|
||||
""" assume inputs and weights are 1-D numpy arrays and bias is a number """
|
||||
cell_body_sum = np.sum(inputs * self.weights) + self.bias
|
||||
firing_rate = 1.0 / (1.0 + math.exp(-cell_body_sum)) # sigmoid activation function
|
||||
return firing_rate
|
25
numpy_examples/nn_case_study.py
Normal file
25
numpy_examples/nn_case_study.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
# Adapted from: http://cs231n.github.io/neural-networks-case-study/
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
N = 100 # number of points per class
|
||||
D = 2 # dimensionality
|
||||
K = 3 # number of classes
|
||||
|
||||
# data matrix (each row = single example)
|
||||
X = np.zeros((N*K, D))
|
||||
# class labels
|
||||
y = np.zeros(N*K, dtype='uint8')
|
||||
|
||||
for j in range(K):
|
||||
ix = range(N*j,N*(j+1))
|
||||
r = np.linspace(0.0,1,N) # radius
|
||||
t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta
|
||||
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
|
||||
y[ix] = j
|
||||
|
||||
|
||||
# visualize the data:
|
||||
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
|
344
numpy_examples/nn_simple.ipynb
Normal file
344
numpy_examples/nn_simple.ipynb
Normal file
File diff suppressed because one or more lines are too long
Loading…
Add table
Add a link
Reference in a new issue