tensorflow-for-deep-learnin.../TensorFlow/intro_linear_model.py
2016-08-20 00:12:32 -07:00

36 lines
930 B
Python

import numpy as np
import tensorflow as tf
# Model linear regression y = Wx + b
x = tf.placeholder(tf.float32, [None, 1])
W = tf.Variable(tf.zeros([1,1]))
b = tf.Variable(tf.zeros([1]))
product = tf.matmul(x,W)
y = product + b
y_ = tf.placeholder(tf.float32, [None, 1])
# Cost function sum((y_-y)**2)
cost = tf.reduce_mean(tf.square(y_-y))
# Training using Gradient Descent to minimize cost
train_step = tf.train.GradientDescentOptimizer(0.0000001).minimize(cost)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
steps = 1000
for i in range(steps):
# Create fake data for y = W.x + b where W = 2, b = 0
xs = np.array([[i]])
ys = np.array([[2*i]])
# Train
feed = { x: xs, y_: ys }
sess.run(train_step, feed_dict=feed)
print("After %d iteration:" % i)
print("W: %f" % sess.run(W))
print("b: %f" % sess.run(b))
print("cost: %f" % sess.run(cost, feed_dict=feed))