I am a machine learning newbie and recently I implemented (or at least tried to implement) a linear regression model in tensorflow. I would love to know how I can improve my code:
import tensorflow as tf
import numpy as np
# parameters
learning_rate = 0.001
training_epochs = 10000
# training data
train_X = np.linspace(-1, 1, 100)
train_Y = train_X * 3 + np.random.randn(train_X.shape[0]) * 0.5
# create placeholders for X and Y
x = tf.placeholder(tf.float32, name="X")
y = tf.placeholder(tf.float32, name="Y")
# create weight and bias, initialized to 0
W = tf.Variable(0.0, name="weight")
b = tf.Variable(0.0, name="bias")
# linear model
linear_model = W * x + b
# square error as the loss function
loss = tf.square(y - linear_model)
# gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# global variables initializer
init = tf.global_variables_initializer()
# training the model
with tf.Session() as sess:
sess.run(init)
for i in range(training_epochs):
sess.run(optimizer, feed_dict={x: train_X, y: train_Y})
W_value, b_value = sess.run([W, b])
# print the values of w and b
print(W_value)
print(b_value)
EDIT And one more thing - if I have more than just one variable, for example two, can I change my code simply in this way?
import tensorflow as tf
import numpy as np
# parameters
learning_rate = 0.001
training_epochs = 10000
# training data
train_X1 = np.linspace(-1, 1, 100)
train_X2 = np.linspace(2, 3, 100)
train_Y = train_X1 * 3 + train_X2* 2 + np.random.randn(train_X1.shape[0]) *
0.5
# create placeholders for X and Y
x1 = tf.placeholder(tf.float32, name = "X1")
x2 = tf.placeholder(tf.float32, name = "X2")
y = tf.placeholder(tf.float32, name = "Y")
# weight and bias, initialized to 0
W1 = tf.Variable(0.0, name = "weight1")
W2 = tf.Variable(0.0, name = "weight2")
b = tf.Variable(0.0, name = "bias")
# linear model
linear_model = W1 * x1 + W2 * x2 + b
# square error as the loss function
loss = tf.square(y - linear_model)
# gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# global variables initializer
init = tf.global_variables_initializer()
# training the model
with tf.Session() as sess:
writer = tf.summary.FileWriter("output", sess.graph)
sess.run(init)
for i in range(training_epochs):
for (X1, X2, Y) in zip(train_X1, train_X2, train_Y):
sess.run(optimizer, feed_dict={x1: X1, x2: X2, y: Y})
W1_value, W2_value, b_value = sess.run([W1, W2, b])
writer.close()
# print the values of w and b
print(W1_value)
print(W2_value)
print(b_value)
(or at least tried to implement)did you attempt to verify whether it works as intended? What were the results? \$\endgroup\$