4
\$\begingroup\$

I am a machine learning newbie and recently I implemented (or at least tried to implement) a linear regression model in tensorflow. I would love to know how I can improve my code:

import tensorflow as tf
import numpy as np

# parameters
learning_rate = 0.001
training_epochs = 10000
# training data
train_X = np.linspace(-1, 1, 100)
train_Y = train_X * 3 + np.random.randn(train_X.shape[0]) * 0.5
# create placeholders for X and Y
x = tf.placeholder(tf.float32, name="X")
y = tf.placeholder(tf.float32, name="Y")
# create weight and bias, initialized to 0
W = tf.Variable(0.0, name="weight")
b = tf.Variable(0.0, name="bias")
# linear model
linear_model = W * x + b
# square error as the loss function
loss = tf.square(y - linear_model)
# gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# global variables initializer
init = tf.global_variables_initializer()
# training the model
with tf.Session() as sess:
    sess.run(init)
    for i in range(training_epochs):
        sess.run(optimizer, feed_dict={x: train_X, y: train_Y})
    W_value, b_value = sess.run([W, b])
    # print the values of w and b
    print(W_value)
print(b_value)

EDIT And one more thing - if I have more than just one variable, for example two, can I change my code simply in this way?

import tensorflow as tf
import numpy as np

# parameters
learning_rate = 0.001
training_epochs = 10000

# training data
train_X1 = np.linspace(-1, 1, 100)
train_X2 = np.linspace(2, 3, 100)
train_Y = train_X1 * 3 + train_X2* 2 + np.random.randn(train_X1.shape[0]) * 
0.5

# create placeholders for X and Y
x1 = tf.placeholder(tf.float32, name = "X1")
x2 = tf.placeholder(tf.float32, name = "X2")
y = tf.placeholder(tf.float32, name = "Y")

# weight and bias, initialized to 0
W1 = tf.Variable(0.0, name = "weight1")
W2 = tf.Variable(0.0, name = "weight2")
b = tf.Variable(0.0, name = "bias")

# linear model
linear_model = W1 * x1 + W2 * x2 + b

# square error as the loss function
loss = tf.square(y - linear_model)

# gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

# global variables initializer
init = tf.global_variables_initializer()

# training the model
with tf.Session() as sess:
    writer = tf.summary.FileWriter("output", sess.graph)
    sess.run(init)
    for i in range(training_epochs):
        for (X1, X2, Y) in zip(train_X1, train_X2, train_Y):
            sess.run(optimizer, feed_dict={x1: X1, x2: X2, y: Y})
    W1_value, W2_value, b_value = sess.run([W1, W2, b])
    writer.close()

# print the values of w and b
print(W1_value)
print(W2_value)
print(b_value)
\$\endgroup\$
2
  • \$\begingroup\$ (or at least tried to implement) did you attempt to verify whether it works as intended? What were the results? \$\endgroup\$ Commented Mar 12, 2018 at 16:22
  • \$\begingroup\$ I did not actually checked the accuracy on a test set but when I run this code ( the first one) it usually returns the W value very very close to 3.0 so I think it is not that bad :D \$\endgroup\$ Commented Mar 12, 2018 at 18:26

0

You must log in to answer this question.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.