2019-03-31

《机器学习》-自编码器

#简单的自编码器
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
import numpy as np
n_inputs = 3 #3D_inputs
n_hidden = 2 #2D codings
n_ouputs = n_inputs
learning_rate = 0.01
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = fully_connected(X,n_hidden, activation_fn=None)
outputs = fully_connected(hidden, n_ouputs, activation_fn=None)
reconstruction_loss = tf.reduce_mean(tf.square(outputs-X)) #MSE
optimizer = tf.train.AdamOptimizer(reconstruction_loss)
training_op = optimizer.minimize(reconstruction_loss)
init = tf.global_variables_initializer()
#training
X_train,X_test = (np.random.rand(100,3),np.random.rand(10,3))
n_iterations = 1000
codings = hidden
codings_val = None
with tf.Session() as sess:
    init.run()
    for iteration in range(n_iterations):
        training_op.run(feed_dict={X: X_train})
    codings_val = codings.eval(feed_dict={X:X_test})
    res_val = outputs.eval(feed_dict={hidden:codings_val})
#SAE 栈自编码器
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from tensorflow.examples.tutorials.mnist.input_data import read_data_sets
import numpy as np
mnist=read_data_sets("./",one_hot=True)
n_inputs = 28*28 # for mnist
n_hidden1 = 300
n_hidden2 = 150 #codings
n_hidden3 = n_hidden1
n_ouputs = n_inputs
learning_rate = 0.01
l2_reg = 0.001

X = tf.placeholder(tf.float32,shape=[None, n_inputs])
with tf.contrib.framework.arg_scope(
    [fully_connected],
    activation_fn=tf.nn.elu,
    weights_initializer=tf.contrib.layers.variance_scaling_initializer(),
    weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg)):
    hidden1 = fully_connected(X,n_hidden1)
    hidden2 = fully_connected(hidden1,n_hidden2)
    hidden3 = fully_connected(hidden2, n_hidden3)
    outputs = fully_connected(hidden3,n_ouputs,activation_fn=None)
reconstruction_loss = tf.reduce_mean(tf.square(outputs-X))

reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([reconstruction_loss]+reg_loss)

optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)

init = tf.global_variables_initializer()
#训练
n_epochs = 5
batch_size = 150
with tf.Session() as sess:
    init.run()
    for epoch in range(n_epochs):
        n_batches = mnist.train.num_examples
        for iteration in range(n_batches):
            X_batch, y_batch = mnist.train.next_batch(batch_size)
            sess.run(training_op, feed_dict={X:X_batch})
©著作权归作者所有,转载或内容合作请联系作者
【社区内容提示】社区部分内容疑似由AI辅助生成,浏览时请结合常识与多方信息审慎甄别。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

相关阅读更多精彩内容

  • 《刺客》 对刺客的钦敬与怀想,用泰戈尔《吉檀迦利》里的一句诗:”这是最最遥远的路程,来到最接近你的地方……。”来诠...
    李淼_675a阅读 3,369评论 0 2
  • 《机器学习实战》--增強学习(代码未经验证)
    sea_monster阅读 1,396评论 0 0
  • 前端面试题汇总(HTML+CSS) 此文章转载至https://github.com/ltadpoles/web-...
    幻的路上阅读 1,684评论 0 1
  • 文/zero007 谁也无法预知自己会在岁月的长河里遇见什么,好的坏的都是未知的。我们能一眼能看到的,就是此刻阳光...
    zero007阅读 3,496评论 14 14
  • 我们看了很多书,上了很多课,学了很多知识,我们知道的很多,讲道理都是一套套的,可我们做到了多少呢? 比如对待孩子的...
    方圆fg阅读 3,682评论 1 5

友情链接更多精彩内容