mirror of
https://github.com/autistic-symposium/tensorflow-for-deep-learning-py.git
synced 2025-05-10 10:45:04 -04:00
clean up
This commit is contained in:
parent
da1b22cf4a
commit
db89b720d8
13 changed files with 2873 additions and 0 deletions
BIN
.DS_Store
vendored
BIN
.DS_Store
vendored
Binary file not shown.
36
Numpy/Li.py
Normal file
36
Numpy/Li.py
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
def L_i(x, y, W):
|
||||||
|
"""
|
||||||
|
unvectorized version. Compute the multiclass svm loss for a single example (x,y)
|
||||||
|
- x is a column vector representing an image (e.g. 3073 x 1 in CIFAR-10)
|
||||||
|
with an appended bias dimension in the 3073-rd position (i.e. bias trick)
|
||||||
|
- y is an integer giving index of correct class (e.g. between 0 and 9 in CIFAR-10)
|
||||||
|
- W is the weight matrix (e.g. 10 x 3073 in CIFAR-10)
|
||||||
|
"""
|
||||||
|
delta = 1.0 # see notes about delta later in this section
|
||||||
|
scores = W.dot(x) # scores becomes of size 10 x 1, the scores for each class
|
||||||
|
correct_class_score = scores[y]
|
||||||
|
D = W.shape[0] # number of classes, e.g. 10
|
||||||
|
loss_i = 0.0
|
||||||
|
for j in xrange(D): # iterate over all wrong classes
|
||||||
|
if j == y:
|
||||||
|
# skip for the true class to only loop over incorrect classes
|
||||||
|
continue
|
||||||
|
# accumulate loss for the i-th example
|
||||||
|
loss_i += max(0, scores[j] - correct_class_score + delta)
|
||||||
|
return loss_i
|
||||||
|
|
||||||
|
def L_i_vectorized(x, y, W):
|
||||||
|
"""
|
||||||
|
A faster half-vectorized implementation. half-vectorized
|
||||||
|
refers to the fact that for a single example the implementation contains
|
||||||
|
no for loops, but there is still one loop over the examples (outside this function)
|
||||||
|
"""
|
||||||
|
delta = 1.0
|
||||||
|
scores = W.dot(x)
|
||||||
|
# compute the margins for all classes in one vector operation
|
||||||
|
margins = np.maximum(0, scores - scores[y] + delta)
|
||||||
|
# on y-th position scores[y] - scores[y] canceled and gave delta. We want
|
||||||
|
# to ignore the y-th position and only consider margin on max wrong class
|
||||||
|
margins[y] = 0
|
||||||
|
loss_i = np.sum(margins)
|
||||||
|
return loss_i
|
23
Numpy/NeareastNeighbor.py
Normal file
23
Numpy/NeareastNeighbor.py
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
class NearestNeighbor(object):
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def train(self, X, y):
|
||||||
|
self.Xtr = X
|
||||||
|
self.ytr = y
|
||||||
|
|
||||||
|
def predict(self, X):
|
||||||
|
num_test = X.shape[0]
|
||||||
|
Ypred = np.zeros(num_test, dtype = self.ytr.dtype)
|
||||||
|
|
||||||
|
# loop over all test rows
|
||||||
|
for i in xrange(num_test):
|
||||||
|
# find the nearest training image to the i'th test image
|
||||||
|
# using the L1 distance (sum of absolute value differences)
|
||||||
|
distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1)
|
||||||
|
min_index = np.argmin(distances) # get the index with smallest distance
|
||||||
|
Ypred[i] = self.ytr[min_index] # predict the label of the nearest example
|
||||||
|
|
||||||
|
return Ypred
|
0
Numpy/README.md
Normal file
0
Numpy/README.md
Normal file
2776
Numpy/cs228-python-tutorial.ipynb
Executable file
2776
Numpy/cs228-python-tutorial.ipynb
Executable file
File diff suppressed because one or more lines are too long
31
Numpy/gradient.py
Normal file
31
Numpy/gradient.py
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
# compute the gradient numerically:
|
||||||
|
# a generic function takes a function f, a vector x o evaluate
|
||||||
|
# the gradient on, and returns the gradient of f at x:
|
||||||
|
|
||||||
|
def eval_numerical_gradient(f, x):
|
||||||
|
"""
|
||||||
|
a naive implementation of numerical gradient of f at x
|
||||||
|
- f should be a function that takes a single argument
|
||||||
|
- x is the point (numpy array) to evaluate the gradient at
|
||||||
|
"""
|
||||||
|
|
||||||
|
fx = f(x) # evaluate function value at original point
|
||||||
|
grad = np.zeros(x.shape)
|
||||||
|
h = 0.00001
|
||||||
|
|
||||||
|
# iterate over all indexes in x
|
||||||
|
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
|
||||||
|
while not it.finished:
|
||||||
|
|
||||||
|
# evaluate function at x+h
|
||||||
|
ix = it.multi_index
|
||||||
|
old_value = x[ix]
|
||||||
|
x[ix] = old_value + h # increment by h
|
||||||
|
fxh = f(x) # evalute f(x + h)
|
||||||
|
x[ix] = old_value # restore to previous value (very important!)
|
||||||
|
|
||||||
|
# compute the partial derivative
|
||||||
|
grad[ix] = (fxh - fx) / h # the slope
|
||||||
|
it.iternext() # step to next dimension
|
||||||
|
|
||||||
|
return grad
|
7
Numpy/neuron.py
Normal file
7
Numpy/neuron.py
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
class Neuron(object):
|
||||||
|
# ...
|
||||||
|
def forward(inputs):
|
||||||
|
""" assume inputs and weights are 1-D numpy arrays and bias is a number """
|
||||||
|
cell_body_sum = np.sum(inputs * self.weights) + self.bias
|
||||||
|
firing_rate = 1.0 / (1.0 + math.exp(-cell_body_sum)) # sigmoid activation function
|
||||||
|
return firing_rate
|
BIN
Papers/.DS_Store
vendored
BIN
Papers/.DS_Store
vendored
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading…
Add table
Add a link
Reference in a new issue