线性回归的TensorBoard的使用方法
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
train_x=np.linspace(-1,1,100)
train_y=2*train_x+np.random.randn(train_x.shape[0])*0.2
X=tf.placeholder("float32")
Y=tf.placeholder("float32")
W=tf.Variable(tf.random_normal([1]),name="wight")
b=tf.Variable(tf.zeros([1]),name="bias")
z=tf.multiply(W,X)+b
##创建z的直方图
tf.summary.histogram('z',z)
cost=tf.reduce_mean(tf.square(Y-z))
#创建cost的刻度图
tf.summary.scalar("loss_function",cost)
learing_rate=0.001
optimizer=tf.train.GradientDescentOptimizer(learing_rate).minimize(cost)
init=tf.global_variables_initializer()
training_epochs=100
dispaly_step=5
saver=tf.train.Saver()
savedir="./model_saver/"
with tf.Session() as sess:
sess.run(init)
merge_summy_op=tf.summary.merge_all()
summary_writer=tf.summary.FileWriter("./mnist_with_summaeies",sess.graph)
plotdata={"batchsize":[],"loss":[]}
for epoch in range(training_epochs):
for (x,y) in zip(train_x,train_y):
sess.run(optimizer,feed_dict={X:x,Y:y})
#创建tensorboard模板
summary_str=sess.run(merge_summy_op,feed_dict={X:x,Y:y})
summary_writer.add_summary(summary_str,epoch)
if epoch %dispaly_step==0:
loss=sess.run(cost,feed_dict={X:train_x,Y:train_y})
print("Epoch=",epoch+1,"cost=",loss,"W=",sess.run(W),"b=",sess.run(b))
if not(loss=="NA"):
plotdata["batchsize"].append(epoch)
plotdata["loss"].append(loss)
print("finished!")
print("cost=",sess.run(cost,feed_dict={X:train_x,Y:train_y}),"W=",sess.run(W),"b=",sess.run(b))
print("x=0.2,z=",sess.run(z,feed_dict={X:0.2}))
Epoch= 1 cost= 0.05277101 W= [1.7636517] b= [0.01116825]
Epoch= 6 cost= 0.04374114 W= [1.8228621] b= [0.04228429]
Epoch= 11 cost= 0.039844844 W= [1.8654532] b= [0.05286822]
Epoch= 16 cost= 0.03792941 W= [1.8959249] b= [0.05615382]
Epoch= 21 cost= 0.036955718 W= [1.9176637] b= [0.05693232]
Epoch= 26 cost= 0.036457047 W= [1.9331527] b= [0.05691342]
Epoch= 31 cost= 0.036200985 W= [1.9441799] b= [0.05668959]
Epoch= 36 cost= 0.03606906 W= [1.9520282] b= [0.05645309]
Epoch= 41 cost= 0.03600074 W= [1.9576116] b= [0.05625649]
Epoch= 46 cost= 0.03596507 W= [1.9615854] b= [0.05610608]
Epoch= 51 cost= 0.035946265 W= [1.9644104] b= [0.0559954]
Epoch= 56 cost= 0.035936207 W= [1.9664214] b= [0.05591514]
Epoch= 61 cost= 0.03593073 W= [1.9678534] b= [0.05585751]
Epoch= 66 cost= 0.035927683 W= [1.9688714] b= [0.05581635]
Epoch= 71 cost= 0.035925943 W= [1.9695956] b= [0.05578701]
Epoch= 76 cost= 0.035924934 W= [1.9701097] b= [0.05576613]
Epoch= 81 cost= 0.03592432 W= [1.9704767] b= [0.05575125]
Epoch= 86 cost= 0.035923943 W= [1.9707358] b= [0.05574072]
Epoch= 91 cost= 0.0359237 W= [1.9709207] b= [0.05573321]
Epoch= 96 cost= 0.03592354 W= [1.9710522] b= [0.05572788]
finished!
cost= 0.035923455 W= [1.971129] b= [0.05572475]
x=0.2,z= [0.44995055]
解决 No dashboards are active for the current data set的问题
在生成好tensorboard 相关数据之后,根据提示运行tensorboard 提示No dashboards are active for the current data set的错误,
注意 这个问题一定在命令行中其动tensorboard命令时路径出了问题,一定要确定命令的参数是保存events.out.tfevents文件的上级目录的绝对地址,同时,重要的问题说三遍:
一定不要使用中文路径名,一定不要使用有空格的名字作为文件夹名!!!
一定不要使用中文路径名,一定不要使用有空格的名字作为文件夹名!!!
一定不要使用中文路径名,一定不要使用有空格的名字作为文件夹名!!!
注意这个问题后,根据tensorboard的使用方法的博文进行设置即可