基于FastText的文本分类
step1.转换为FastText需要的格式
import pandas as pd
from sklearn.metrics import f1_score
train_df = pd.read_csv('/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task01_preparing_20200719/input/train_set.csv', *sep*='t', *nrows*=15000)
train_df['label_ft'] = '__label__' + train_df['label'].astype(*str*)
train_df[['text','label_ft']].iloc[:-5000].to_csv('train.csv', *index*=None, *header*=None, *sep*='t')
step2.FastText分类
import fasttext
model = fasttext.train_supervised('train.csv', *lr*=1.0, *wordNgrams*=2, *verbose*=2, *minCount*=1, *epoch*=25, *loss*="hs")
val_pred = [model.predict(x)[0][0].split('__')[-1] for x in train_df.iloc[-5000:]['text']]
print(f1_score(train_df['label'].values[-5000:].astype(*str*), val_pred, *average*='macro'))
Output:
>>> 0.824871229687983
test:增加样本量至10w
train_df2 = pd.read_csv('/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task01_preparing_20200719/input/train_set.csv', *sep*='t', *nrows*=100000)
train_df2['label_ft'] = '__label__' + train_df2['label'].astype(*str*)
train_df2[['text','label_ft']].iloc[:-5000].to_csv('train.csv', *index*=None, *header*=None, *sep*='t')\
model2 = fasttext.train_supervised('train.csv', *lr*=1.0, *wordNgrams*=2, *verbose*=2, *minCount*=1, *epoch*=25, *loss*="hs")
val_pred2 = [model2.predict(x)[0][0].split('__')[-1] for x in train_df2.iloc[-5000:]['text']]
print(f1_score(train_df2['label'].values[-5000:].astype(*str*), val_pred2, *average*='macro'))
Output:
>>> 0.9031919041861232
本章作业
'''
Todo: 1.阅读文档,弄清楚参数的大致含义,哪些参数会增加模型的复杂度 2.在验证集上进行验证模型精度,找到模型在是否过拟合还是欠拟合
参考:
https://pypi.org/project/fasttext/
https://zhuanlan.zhihu.com/p/66739066
'''
阅读FastText的文档,尝试修改参数,得到更好的分数
# def train_model(ipt=None, opt=None, model='', dim=100, epoch=5, lr=0.1, loss='softmax'):
# np.set_printoptions(suppress=True)
# if os.path.isfile(model):
# classifier = fasttext.load_model(model)
# else:
# classifier = fasttext.train_supervised(ipt, label='__label__', dim=dim, epoch=epoch, lr=lr, wordNgrams=2, loss=loss)
# """
# 训练一个监督模型, 返回一个模型对象
# @param input: 训练数据文件路径
# @param lr: 学习率
# @param dim: 向量维度
# @param ws: cbow模型时使用
# @param epoch: 次数
# @param minCount: 词频阈值, 小于该值在初始化时会过滤掉
# @param minCountLabel: 类别阈值,类别小于该值初始化时会过滤掉
# @param minn: 构造subword时最小char个数
# @param maxn: 构造subword时最大char个数
# @param neg: 负采样
# @param wordNgrams: n-gram个数
# @param loss: 损失函数类型, softmax, ns: 负采样, hs: 分层softmax
# @param bucket: 词扩充大小, [A, B]: A语料中包含的词向量, B不在语料中的词向量
# @param thread: 线程个数, 每个线程处理输入数据的一段, 0号线程负责loss输出
# @param lrUpdateRate: 学习率更新
# @param t: 负采样阈值
# @param label: 类别前缀
# @param verbose: ??
# @param pretrainedVectors: 预训练的词向量文件路径, 如果word出现在文件夹中初始化不再随机
# @return model object
# """
# classifier.save_model(opt)
# return classifier
# dim = 100
# lr = 5
# epoch = 5
# model = f'data_dim{str(dim)}_lr0{str(lr)}_iter{str(epoch)}.model'
# classifier = train_model(ipt='data_train.txt',
# opt=model,
# model=model,
# dim=dim, epoch=epoch, lr=0.5
# )
# result = classifier.test('data_test.txt')
# print(result)
model3 = fasttext.train_supervised('train.csv', *lr*=1.0, *wordNgrams*=5,*verbose*=2, *minCount*=1, *epoch*=25, *loss*="hs")
val_pred3 = [model3.predict(x)[0][0].split('__')[-1] for x in train_df2.iloc[-5000:]['text']]
print(f1_score(train_df2['label'].values[-5000:].astype(*str*), val_pred3, *average*='macro'))
Output:
# 原始:lr=1.0, wordNgrams=2, verbose=2, minCount=1, epoch=25, loss="hs"
## >>> 0.9031919041861232
# 参数调整,学习率、向量维度、次数、词频阈值、n-gram个数
## lr=5.0, wordNgrams=2, verbose=2, minCount=5, epoch=50, loss="hs"
## >>> 0.8965417666847781
## lr=0.5, wordNgrams=2, dim=128, verbose=2, minCount=5, epoch=50, loss="hs"
## >>> 0.900467944939494
## lr=0.5, wordNgrams=2, dim=64, verbose=2, minCount=5, epoch=50, loss="hs"
## >>> 0.8988977134190188
## lr=1.0, wordNgrams=2, dim=128, verbose=2, minCount=5, epoch=50, loss="hs"
## >>> 0.8994338390877419
## lr=1.0, wordNgrams=2, dim=128, verbose=2, minCount=1, epoch=25, loss="hs"
## >>> 0.9037370211866124
## lr=1.0, wordNgrams=5, verbose=2, minCount=1, epoch=25, loss="hs"
## >>> 0.913202595672358
使用验证集调参,优化模型
'''
十折交叉验证
(1) 留出法 Hold-out method 将原始数据随机分为两组,一组做为训练集,一组做为验证集,利用训练集训练分类器,然后利用验证集验证模型,记录最后的分类准确率
(2)十折交叉验证 10-fold cross-validation 将数据集分成十份,轮流将其中9份作为训练数据,1份作为测试数据,进行试验。 每次试验都会得出相应的正确率(或差错率)。10次的结果的正确率(或差错率)的平均值作为对算法精度的估计,一般还需要进行多次10折交叉验证(例如10次10折交叉验证),再求其均值,作为对算法准确性的估计。
(3) 留一验证 leave-one-out cross-validation 留一验证(LOOCV)意指只使用原本样本中的一项来当做验证资料, 而剩余的则留下来当做训练资料。 这个步骤一直持续到每个样本都被当做一次验证资料。 事实上,这等同于 K-fold 交叉验证是一样的,其中K为原本样本个数。 在某些情况下是存在有效率的演算法,如使用kernel regression 和Tikhonov regularization。
参考:
https://blog.csdn.net/Dream_angel_Z/article/details/47110077 https://blog.csdn.net/Tunnel_/article/details/107614991
'''
用StratifiedKFold实现十折交叉划分
step1.将原始数据进行十折交叉划分,分成十份训练集和测试集并保存。
from sklearn.model_selection import StratifiedKFold
print('starting K10 cross-validation data split:')
# train_df = pd.read_csv('data/train_set.csv', sep='t')
# 分层采样,确保分出来的训练集和测试集中各类别样本的比例与原始数据集中相同。
skf = StratifiedKFold(n_splits=10)
for n_fold, (tr_idx, val_idx) in enumerate(skf.split(train_df2['text'],train_df2['label'])):
print(f'the {n_fold} data split ...')
tr_x, tr_y, val_x, val_y = train_df2['text'].iloc[tr_idx], train_df2['label'][tr_idx], train_df2['text'].iloc[val_idx], train_df2['label'][val_idx]
tr_y = '__label__' + tr_y.astype(*str*)
traindata = pd.DataFrame(*list*(zip(tr_x.values, tr_y.values)))
traindata.to_csv(f'/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task04_dl_fastText_20200726/fasttext_skf10_datasplit/train_split{n_fold}.csv', *index*=None, *header*=['text', 'label_ft'], *sep*='t')
testdata = pd.DataFrame(*list*(zip(val_x.values, val_y.values)))
testdata.to_csv(f'/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task04_dl_fastText_20200726/fasttext_skf10_datasplit/test_split{n_fold}.csv', *index*=None, *header*=['text', 'label'], *sep*='t')
# >>> ...
# the 0 data split ...
# the 1 data split ...
# the 2 data split ...
# the 3 data split ...
# the 4 data split ...
# the 5 data split ...
# the 6 data split ...
# the 7 data split ...
# the 8 data split ...
# the 9 data split ...
step2.利用十折交叉验证调参
print('starting K10 cross-validation training:')
val_f1=[]
for n_fold in range(10):
model = fasttext.train_supervised(f'/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task04_dl_fastText_20200726/fasttext_skf10_datasplit/train_split{n_fold}.csv', *lr*=1.0, *wordNgrams*=2, *verbose*=2, *minCount*=1, *epoch*=25, *loss*='hs')
val_df = pd.read_csv(f'/Users/summer/Desktop/xul_data/learning/DataWhale/20200719NLP/task04_dl_fastText_20200726/fasttext_skf10_datasplit/test_split{n_fold}.csv', *sep*='t')
val_pred = [model.predict(x)[0][0].split('__')[-1] for x in val_df['text']]
val_f1.append(f1_score(val_df['label'].values.astype(*str*), val_pred, *average*='macro'))
print(f'the f1_score of {n_fold} training is:', val_f1[n_fold])
print()
print('The average f1_score is', sum(val_f1)/len(val_f1))
Output:
# Progress: 100.0% words/sec/thread: 2088734 lr: 0.000000 avg.loss: 0.092633 ETA: 0h 0m 0s
# the f1_score of 0 training is: 0.9058600633604391
# Progress: 100.0% words/sec/thread: 2114597 lr: 0.000000 avg.loss: 0.092621 ETA: 0h 0m 0s
# the f1_score of 1 training is: 0.9017374331381326
# Progress: 100.0% words/sec/thread: 2175324 lr: 0.000000 avg.loss: 0.096158 ETA: 0h 0m 0s
# the f1_score of 2 training is: 0.9039268911845869
# Progress: 100.0% words/sec/thread: 2072916 lr: 0.000000 avg.loss: 0.091677 ETA: 0h 0m 0s
# the f1_score of 3 training is: 0.902637362408513
# Progress: 100.0% words/sec/thread: 2137606 lr: 0.000000 avg.loss: 0.094607 ETA: 0h 0m 0s
# the f1_score of 4 training is: 0.9002063369408493
# Progress: 100.0% words/sec/thread: 2022383 lr: 0.000000 avg.loss: 0.091986 ETA: 0h 0m 0s
# the f1_score of 5 training is: 0.9077481432048907
# Progress: 100.0% words/sec/thread: 2183498 lr: 0.000000 avg.loss: 0.093751 ETA: 0h 0m 0s
# the f1_score of 6 training is: 0.9081961005846353
# Progress: 100.0% words/sec/thread: 2033608 lr: 0.000000 avg.loss: 0.094676 ETA: 0h 0m 0s
# the f1_score of 7 training is: 0.8997523965098473
# Progress: 100.0% words/sec/thread: 2114997 lr: 0.000000 avg.loss: 0.093399 ETA: 0h 0m 0s
# the f1_score of 8 training is: 0.9106292751332932
# Progress: 100.0% words/sec/thread: 2104720 lr: 0.000000 avg.loss: 0.092281 ETA: 0h 0m 0s
# the f1_score of 9 training is: 0.8980215563264459
# The average f1_score is 0.9038715558791633
# >>> 较原始0.9031919041861232提升了0.07%