利用LSTM做语言情感分类

main.py

# -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 08:43:37 2018

@author: yanghe
"""

# -*- coding: utf-8 -*-
"""
Created on Sat Apr 28 20:39:38 2018

@author: yanghe
"""

import tensorflow as tf
import numpy as np
from  words_tool import *



X_train, Y_train = read_csv('data/train_emoji.csv')
X_test, Y_test = read_csv('data/tesss.csv')
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt')
X_train_indices = sentences_to_indices(X_train, word_to_index, 10)
Y_train_oh = convert_to_one_hot(Y_train, C = 5)
X_test_indices = sentences_to_indices(X_test, word_to_index, 10)
Y_test_oh = convert_to_one_hot(Y_test, C = 5)


num_steps = 10
size = 50
n_hidden = 128
batch_size = 4
vocab_size = 400000
max_epoch  = 10
learning_rate = 0.01
n_classes = 5
count = 0

input_data = tf.placeholder(tf.int32, [None, num_steps])
targets = tf.placeholder(tf.int32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)

def model(input_data,on_training):
    with tf.variable_scope("embed") :
        emb_matrix = np.zeros((vocab_size+1, size))
        for  word, index in word_to_index.items():
            emb_matrix[index, :] = word_to_vec_map[word]
        embedding = tf.get_variable("embedding", initializer=tf.constant(emb_matrix,tf.float32),trainable=False)
        inputs = tf.nn.embedding_lookup(embedding , input_data)
    
    with tf.variable_scope('Bi_RNN'):
        inputs = tf.transpose(inputs, [1, 0, 2])
        inputs = tf.reshape(inputs, [-1, size])
        inputs = tf.split(inputs, num_steps)
        lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
        lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
        
        if on_training: 
            lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=keep_prob)
            lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=keep_prob)
            
        outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell,
                                                                lstm_bw_cell,
                                                                inputs,
                                                                dtype=tf.float32)
    with tf.name_scope('softmx'):
        weights = tf.get_variable('weights', shape=[2*n_hidden, n_classes], initializer=tf.truncated_normal_initializer(stddev=0.1))
        biases = tf.get_variable('biases', shape=[n_classes], initializer=tf.truncated_normal_initializer(stddev=0.1))
    outputs = tf.matmul(outputs[-1], weights) + biases 
    return outputs                
def train():
    on_training = True
    with tf.variable_scope("pred"):
        pred = model(input_data,on_training)
    saver = tf.train.Saver()
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=targets))
    train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    correct_pred = tf.equal(tf.argmax(targets, 1), tf.argmax(pred, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    with tf.Session() as sess :
        tf.global_variables_initializer().run()
        for i in range(max_epoch):
            _= sess.run([train_op ],feed_dict={input_data: X_train_indices, 
                                               targets: Y_train_oh,
                                               keep_prob:0.8})
            if i % 5 == 0 :
                accuracy_= sess.run(accuracy,feed_dict={input_data:X_test_indices, 
                                                        targets:Y_test_oh,
                                                        keep_prob:1.0})
            print("After %d , validation accuracy is %s " % (i,accuracy_))
        saver.save(sess , 'saver/moedl_em.ckpt')

    
def predict():
    global count
    on_training = False
    with tf.variable_scope("pred") as scope:
        if count == 0:
            pass
        else:
            scope.reuse_variables()
        pred = model(input_data,on_training)
        count += 1
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver.restore(sess,'./saver/moedl_em.ckpt')
#        usr_input = input("Write the beginning of your want to say , but not benyond 10 words :")
        X_test = sentences_to_indices(np.array([usr_input]), word_to_index, 10)
        predict_= sess.run(pred,feed_dict={input_data:X_test,keep_prob:1.0})
        print('you input is ',usr_input,'machine predict you want tp say:--->',label_to_emoji(np.argmax(predict_)))
        
#train()
predict()
#I love taking breaks
# This girl is messing with me
# she got me a nice present
# work is hard

words_tool.py

# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 09:48:38 2018

@author: yanghe
"""

import numpy as np
import csv

def convert_to_one_hot(Y, C):
    Y = np.eye(C)[Y.reshape(-1)]
    return Y
def read_glove_vecs(glove_file):
    with open(glove_file, 'r',encoding='utf-8') as f:
        words = set()
        word_to_vec_map = {}
        for line in f:
            line = line.strip().split()
            curr_word = line[0]
            words.add(curr_word)
            word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)
        
        i = 1
        words_to_index = {}
        index_to_words = {}
        for w in sorted(words):
            words_to_index[w] = i
            index_to_words[i] = w
            i = i + 1
    return words_to_index, index_to_words, word_to_vec_map
    
def random_mini_batches(x,y,mini_bath_size =64,seed =0):
    np.random.seed(seed)
    m = x.shape[0]
    mini_batches = []
    permutation = list(np.random.permutation(m))
    shuffled_x = x[permutation]
    shuffled_y = y[permutation]
    
    num_complete_minibatches = int(np.floor(m/mini_bath_size))
    for k in range(0,num_complete_minibatches):
        mini_batch_x = shuffled_x[k*mini_bath_size:(k+1)*mini_bath_size,:,:]
        mini_batch_y = shuffled_y[k*mini_bath_size:(k+1)*mini_bath_size,]
        mini_batch = (mini_batch_x,mini_batch_y)
        mini_batches.append(mini_batch)
    if m % mini_bath_size  != 0:
        mini_batch_x = shuffled_x[(k+1)*mini_bath_size:,:,:]
        mini_batch_y = shuffled_y[(k+1)*mini_bath_size:,:]
        mini_batch = (mini_batch_x,mini_batch_y)
        mini_batches.append(mini_batch)
    return mini_batches

def sentence_to_avg(sentence, word_to_vec_map):
    words = [i.lower() for i in sentence.split()]
    avg = np.zeros((50,))
    for w in words:
        avg += word_to_vec_map[w]
    avg = avg / len(words)
    return avg
    
def sentences_to_indices(X, word_to_index, max_len):
    m = X.shape[0] 
    X_indices = np.zeros((m, max_len))
    for i in range(m):                          
        sentence_words = [w.lower() for w in X[i].split()]
        j = 0
        for w in sentence_words:
            X_indices[i, j] = word_to_index[w]
            j += 1
    return X_indices
    
def read_csv(filename = 'data/emojify_data.csv'):
    phrase = []
    emoji = []

    with open (filename) as csvDataFile:
        csvReader = csv.reader(csvDataFile)

        for row in csvReader:
            phrase.append(row[0])
            emoji.append(row[1])

    X = np.asarray(phrase)
    Y = np.asarray(emoji, dtype=int)

    return X, Y   
emoji_dictionary = {"0": "heart,good",    # :heart: prints a black instead of red heart depending on the font
                    "1": ":baseball:",
                    "2": ":simell",
                    "3": ":disappointed:",
                    "4": ":fork_and_knife:"}

def label_to_emoji(label):
    return emoji_dictionary[str(label)]
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 213,752评论 6 493
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 91,100评论 3 387
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 159,244评论 0 349
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 57,099评论 1 286
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 66,210评论 6 385
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 50,307评论 1 292
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 39,346评论 3 412
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 38,133评论 0 269
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 44,546评论 1 306
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 36,849评论 2 328
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 39,019评论 1 341
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 34,702评论 4 337
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 40,331评论 3 319
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 31,030评论 0 21
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,260评论 1 267
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 46,871评论 2 365
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 43,898评论 2 351

推荐阅读更多精彩内容