真假新闻数据集下载地址:https://www.kaggle.com/c/fake-news/data#
本文采用LSTM进行真假新闻的判别,是二分类任务。
一、导入包
我的tensorflow,keras版本是2.7.0
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Dropout
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
import re
from nltk.corpus import stopwords
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from nltk.stem.porter import PorterStemmer
from sklearn.metrics import accuracy_score
from keras.models import load_model
二、读取数据
df=pd.read_csv('train.csv')
df=df.dropna() ##去掉有nan的行
## Get the Independent Features
X=df.drop('label',axis=1) ##x是去掉标签列的数据
y=df['label'] ##标签列
### Vocabulary size
voc_size=5000
messages=X.copy()
messages.reset_index(inplace=True) ##重置索引,因为有删除操作
三、文本处理
对文本进行处理。
例如:
"President Obama and President-Elect Donald Trump Meet at White House: Share:" 处理完——>
presid obama presid elect donald trump meet white hous share
ps = PorterStemmer()
corpus = []
for i in range(0, len(messages)):
review = re.sub('[^a-zA-Z]', ' ', messages['title'][i]) ##只保留字母
review = review.lower() ##转为小写
review = review.split() ##按空白拆分
#去停用词
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review) ##将列表转为str类型
corpus.append(review)
四、格式转化
要将文本数据转为深度学习的输入格式
onehot_repr=[one_hot(words,voc_size) for words in corpus] ##文本转为one_hot编码格式
sent_length=20
embedded_docs=pad_sequences(onehot_repr,padding='pre',maxlen=sent_length) ##用0前向填充,因为文本长度不一致
##转成np.array数组格式
X_final=np.array(embedded_docs)
y_final=np.array(y)
##划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_final, y_final, test_size=0.33, random_state=42)
五、模型的建立,训练和预测
##keras序贯模型
embedding_vector_features=40
model=Sequential()
model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_length))
model.add(Dropout(0.3))
model.add(LSTM(100)) ##Bidirectional(LSTM(100))双向LSTM替换即可
model.add(Dropout(0.3))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
##开始训练
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)
##预测
y_pred= model.predict(X_test) #输出是一个离散值,越接近1就表示是1的概率越大
六、模型评估保存
##值大于0.5就是1,小于0.5就是0
def probability_to_intres(data):
res = []
for i in data:
if i > 0.5:
res.append(1)
else:
res.append(0)
return res
res = probability_to_intres(y_pred)
confusion_matrix(y_test,res) ##评估矩阵
accuracy_score(y_test,res) ##准确率
model.save('my_model.h5') ##保存
七、若新数据来了
如果来新数据了,判断是否是虚假新闻
new_text = "President Obama and President-Elect Donald Trump Meet at White House: Share:"
##上述方法进行文本处理,转化格式
review1 = re.sub('[^a-zA-Z]', ' ', new_text)
review1 = review1.lower()
review1 = review1.split()
review1 = [ps.stem(word) for word in review1 if not word in stopwords.words('english')]
review1 = ' '.join(review1)
onehot_repr1=[one_hot(review1, voc_size) ]
sent_length=20
embedded_docs1=pad_sequences(onehot_repr1,padding='pre',maxlen=sent_length)
X_final1=np.array(embedded_docs1)
model_best = load_model('my_model.h5') ##导入之前保存的模型
print(model_best.predict(X_final1))
print("最终判断结果:", probability_to_intres(model_best.predict(X_final1)))
完整代码如下
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Dropout
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import one_hot
import re
from nltk.corpus import stopwords
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from nltk.stem.porter import PorterStemmer
from sklearn.metrics import accuracy_score
from keras.models import load_model
df=pd.read_csv('train.csv')
df=df.dropna() ##去掉有nan的行
## Get the Independent Features
X=df.drop('label',axis=1) ##x是去掉标签列的数据
y=df['label'] ##标签列
### Vocabulary size
voc_size=5000
messages=X.copy()
messages.reset_index(inplace=True) ##重置索引,因为有删除操作
ps = PorterStemmer()
corpus = []
for i in range(0, len(messages)):
review = re.sub('[^a-zA-Z]', ' ', messages['title'][i]) ##只保留字母
review = review.lower() ##转为小写
review = review.split() ##按空白拆分
#去停用词
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review) ##将列表转为str类型
corpus.append(review)
onehot_repr=[one_hot(words,voc_size) for words in corpus] ##文本转为one_hot编码格式
sent_length=20
embedded_docs=pad_sequences(onehot_repr,padding='pre',maxlen=sent_length) ##用0前向填充,因为文本长度不一致
##转成np.array数组格式
X_final=np.array(embedded_docs)
y_final=np.array(y)
##划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_final, y_final, test_size=0.33, random_state=42)
##keras序贯模型
embedding_vector_features=40
model=Sequential()
model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_length))
model.add(Dropout(0.3))
model.add(LSTM(100)) ##Bidirectional(LSTM(100))双向LSTM替换即可
model.add(Dropout(0.3))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
##开始训练
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64)
##预测
y_pred= model.predict(X_test) #输出是一个离散值,越接近1就表示是1的概率越大
##值大于0.5就是1,小于0.5就是0
def probability_to_intres(data):
res = []
for i in data:
if i > 0.5:
res.append(1)
else:
res.append(0)
return res
res = probability_to_intres(y_pred)
confusion_matrix(y_test,res) ##评估矩阵
accuracy_score(y_test,res) ##准确率
model.save('my_model.h5') ##保存
#如果来新数据了,判断是否是虚假新闻
new_text = "President Obama and President-Elect Donald Trump Meet at White House: Share:"
##上述方法进行文本处理,转化格式
review1 = re.sub('[^a-zA-Z]', ' ', new_text)
review1 = review1.lower()
review1 = review1.split()
review1 = [ps.stem(word) for word in review1 if not word in stopwords.words('english')]
review1 = ' '.join(review1)
onehot_repr1=[one_hot(review1, voc_size) ]
sent_length=20
embedded_docs1=pad_sequences(onehot_repr1,padding='pre',maxlen=sent_length)
X_final1=np.array(embedded_docs1)
model_best = load_model('my_model.h5') ##导入之前保存的模型
print(model_best.predict(X_final1))
print("最终判断结果:", probability_to_intres(model_best.predict(X_final1)))