#!/usr/bin/python
#encoding=utf-8
#https://blog.csdn.net/zhangqi_gsts/article/details/82187159
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
 
learn_rate = 0.01
 
#y=2x+4
#随机生成y=2x+4的一些坐标点
#train_X = [np.random.randint(20, high=100) for x in range(30)]
#train_Y = [2*x+4+np.random.randint(-10, high=10) for x in train_X]

train_X=[0.149,0.583,1.281,2.223,3.388,4.757,6.309,8.024,9.880,11.859,13.94,16.101,]
train_Y=[430.327,431.563,434.281,439.106,445.917,454.884,465.413,477.988,493.056,510.048,530.081,553.071]

 
#转成成numpy数组
train_X = np.array(train_X)
train_Y = np.array(train_Y)
n_samples = train_X.shape[0]
 
#用placeholder定义X和Y变量,具体值在训练的是用feed_dict填充
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
 
#定义初始化W和b,一般不建议置0,用随机值初始化
W = tf.Variable(np.random.random(), name="weigth")
b = tf.Variable(np.random.random(), name="bias")
 
#带预测的线性方程
prediction = tf.add(tf.multiply(X, W), b)
 
#用最小二乘定义损失函数
cost = tf.reduce_mean(tf.square(prediction - Y))
 
#这里优化器并没有用梯度下降,而用的是Adam优化器,后面说明,优化,使得cost最小
#optimizer = tf.train.GradientDescentOptimizer(learn_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(cost)
 
init = tf.global_variables_initializer()
 
with tf.Session() as sess:
    sess.run(init)
    for i in range(95000):
        for (x, y) in zip(train_X, train_Y):
            #执行定义的优化器,填充placeholder定义的变量
            sess.run(optimizer, feed_dict={X:x, Y:y})
 
        if i % 100 == 0:
            #每隔50次打印一次cost,由于cost依赖X和Y,所以也需要用feed_dict填充
            c = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
            print("Epoch:", '%04d' % i, "cost=", "{:.9f}".format(c), "W=", sess.run(W), "b=", sess.run(b))
 
    print("Optimization Finished!")
 
    training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
    print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b))
 
    plt.plot(train_X, train_Y, 'ro', label="origal data")
    plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label="fit line")
    plt.legend()
    plt.show()

numpy.array 的shape属性理解https://www.cnblogs.com/pupilheart/p/9853739.html

>>> import numpy as np
>>> y = np.array([[1,2,3],[4,5,6]])
>>> print(y)
[[1 2 3]
 [4 5 6]]
>>> print(y.shape) # 展示行数,列数
(2, 3)
>>> print(y.shape[0]) # 展示行数
>>> print(y.shape[1]) # 展示列数

多变量线性回归:https://blog.csdn.net/iv__vi/article/details/82890252

 

Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐