Dropout 解决 overfitting

  • 没有使用 dropout,看到 train,test的loss竟然差不多,但是认真看的话,是 test的loss比train的大的。
# 去掉 warning
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import tensorflow as tf 

# 去掉 warning
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)

# 引入 input_data 文件,这个文件用于去mnist页面获取mnist数据
from tensorflow.examples.tutorials.mnist import input_data

# 读取 mnist 数据集
mnist = input_data.read_data_sets('E:\mnist', one_hot = True)

# 定义全连接层
def add_layer(inputs, in_size, out_size, layer_name, activation_function = None):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
    if activation_function == None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    tf.summary.histogram(layer_name + '/outputs', outputs)
    return outputs

# 计算精确度
def computer_accuracy(v_xs, v_ys):
    global prediction
    y_pre = sess.run(prediction, feed_dict = {xs: v_xs})
    correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    result = sess.run(accuracy, feed_dict = {xs: v_xs, ys: v_ys})
    return result

# 搭建网络
xs = tf.placeholder(tf.float32, [None, 784]) # 28*28
ys = tf.placeholder(tf.float32, [None, 10])

# 调用 add_layer 搭建一个最简单的训练神经网络,只有输入层和输出层
l1 = add_layer(xs, 784, 128, 'l1', activation_function = tf.nn.tanh)
prediction = add_layer(l1, 128, 10, 'l2', activation_function=tf.nn.softmax)


# 损失函数使用交叉熵损失函数 cross_entropy
cross_entropy = tf.reduce_mean(- tf.reduce_sum(ys * tf.log(prediction), reduction_indices = [1]))
tf.summary.scalar('loss', cross_entropy)

# 使用梯度下降法进行训练
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

# 全局变量初始化
init = tf.global_variables_initializer()

merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('E:/tensorflow/graph/train', tf.get_default_graph())
test_writer = tf.summary.FileWriter('E:/tensorflow/graph/test', tf.get_default_graph())

# 开始训练
with tf.Session() as sess:
    sess.run(init)
    for i in range(1001):
        # 小批量梯度下降
        batch_xs, batch_ys = mnist.train.next_batch(100) 
        sess.run(train_step, feed_dict = {xs: batch_xs, ys: batch_ys})
        # 批量梯度下降,每一次全部数据用于得到loss,导致训练很慢
        # sess.run(train_step, feed_dict = {xs: mnist.train.images, ys: mnist.train.labels})
        if i % 50 == 0:
            # print('%4d: %6.4f' %(i, computer_accuracy(mnist.test.images, mnist.test.labels)))
            train_result = sess.run(merged, feed_dict = {xs: mnist.train.images, ys: mnist.train.labels})
            test_result = sess.run(merged, feed_dict = {xs: mnist.test.images, ys: mnist.test.labels})
            train_writer.add_summary(train_result, i)
            test_writer.add_summary(test_result, i)
  • 加上 dropout 之后,我看到竟然 train 和 test 的loss,竟然还是差不多,看来是因为模型的原因了,以后再来验证
# 去掉 warning
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import tensorflow as tf 

# 去掉 warning
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)

# 引入 input_data 文件,这个文件用于去mnist页面获取mnist数据
from tensorflow.examples.tutorials.mnist import input_data

# 读取 mnist 数据集, 将取到的数据集放到 E:\mnist 文件夹
mnist = input_data.read_data_sets('E:\mnist', one_hot = True)

# 定义全连接层
def add_layer(inputs, in_size, out_size, layer_name, activation_function = None):
    Weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)

    # 在这里 dropout
    Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)

    if activation_function == None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    tf.summary.histogram(layer_name + '/outputs', outputs)
    return outputs


# 搭建网络
xs = tf.placeholder(tf.float32, [None, 784]) # 28*28
ys = tf.placeholder(tf.float32, [None, 10])

# 定义概率,以 1 - keep_prob 去掉神经元,也就是说神经元保留百分比为 keep_prob
keep_prob = tf.placeholder(tf.float32) 

# 调用 add_layer 搭建一个最简单的训练神经网络,只有输入层和输出层
l1 = add_layer(xs, 784, 50, 'l1', activation_function = tf.nn.tanh)
prediction = add_layer(l1, 50, 10, 'l2', activation_function=tf.nn.softmax)


# 损失函数使用交叉熵损失函数 cross_entropy
cross_entropy = tf.reduce_mean(- tf.reduce_sum(ys * tf.log(prediction), reduction_indices = [1]))
tf.summary.scalar('loss', cross_entropy)

# 使用梯度下降法进行训练
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

# 全局变量初始化
init = tf.global_variables_initializer()

merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('E:/tensorflow/graph/train', tf.get_default_graph())
test_writer = tf.summary.FileWriter('E:/tensorflow/graph/test', tf.get_default_graph())

# 开始训练
with tf.Session() as sess:
    sess.run(init)
    for i in range(1001):
        # 小批量梯度下降
        batch_xs, batch_ys = mnist.train.next_batch(100) # 每次取出 100 个用于计算loss, 来梯度下降
        sess.run(train_step, feed_dict = {xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
        # 批量梯度下降,每一次全部数据用于得到loss,导致训练很慢, 所以不建议
        # sess.run(train_step, feed_dict = {xs: mnist.train.images, ys: mnist.train.labels})
        if i % 50 == 0:
            train_result = sess.run(merged, feed_dict = {xs: mnist.train.images, ys: mnist.train.labels, keep_prob: 1})
            test_result = sess.run(merged, feed_dict = {xs: mnist.test.images, ys: mnist.test.labels, keep_prob: 1})
            train_writer.add_summary(train_result, i)
            test_writer.add_summary(test_result, i)

参考资料

©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容