数据挖掘-预测贷款用户是否逾期
1 划分数据集
导入宏包
In [1]:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
import warnings
warnings.filterwarnings('ignore')
import matplotlib as mpl
mpl.rcParams['font.sans-serif']=[u'SimHei']
mpl.rcParams['axes.unicode_minus']=False
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_column', 100)
pd.set_option('display.max_colwidth', 50)
pd.set_option('display.float_format',lambda x : '%.5f' % x)
导入数据
In [2]:
data = pd.read_csv('./data/data_clean.csv', encoding='gbk')
Y = data['status']
X = data.drop(['status'], axis=1)
归一化
In [3]:
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
划分数据集
In [4]:
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=2018)
SMOTE 上采样
In [5]:
from imblearn.over_sampling import SMOTE
x_train_smote, y_train_smote = SMOTE(random_state=2018).fit_sample(x_train, y_train)
Using TensorFlow backend.
2 模型融合
模型融合采用 Stacking 方法
依据 Task5 提取 XGBClassifier、LGBMClassifier、RFClassifier 作为基模型
采用 LogisticRegressionCV 作为次模型
2.1 模型调参
现对所有基模型进行调超参优化
XGBClassifier
In [7]:
from xgboost import XGBClassifier
parameters = {'max_depth': [3, 4, 5, 6, 7, 8],
'subsample': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'colsample_bytree': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'learning_rate': [0.01, 0.1, 0.2],
'min_child_weight': [1, 2, 3],
}
n_iter_search = 20
RS = RandomizedSearchCV(XGBClassifier(random_state=2018), parameters, n_iter=n_iter_search, cv=5, iid=False, scoring='roc_auc')
RS.fit(x_train_smote, y_train_smote)
XGB=RS.best_estimator_
print('Test set score: {:.3f}'.format(RS.score(x_test,y_test)))
Test set score: 0.774
LGBMClassifier
In [8]:
from lightgbm import LGBMClassifier
parameters = {'max_depth': [15, 20, 25, 30, 35],
'learning_rate': [0.01, 0.02, 0.05, 0.1, 0.15],
'feature_fraction': [0.6, 0.7, 0.8, 0.9, 0.95],
'bagging_fraction': [0.6, 0.7, 0.8, 0.9, 0.95],
'bagging_freq': [2, 4, 5, 6, 8],
'lambda_l1': [0, 0.1, 0.4, 0.5, 0.6],
'lambda_l2': [0, 10, 15, 35, 40],
'cat_smooth': [1, 10, 15, 20, 35],
}
n_iter_search = 20
RS = RandomizedSearchCV(LGBMClassifier(random_state=2018), parameters, n_iter=n_iter_search, cv=5, iid=False, scoring='roc_auc')
RS.fit(x_train_smote, y_train_smote)
LGBM = RS.best_estimator_
print('Test set score: {:.3f}'.format(RS.score(x_test,y_test)))
Test set score: 0.780
RFClassifier
In [9]:
from sklearn.ensemble import RandomForestClassifier
parameters = {'max_depth': [3, 4, 5, 6, 7],
'max_features': sp_randint(1, 11),
'min_samples_split': sp_randint(2, 11),
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']
}
n_iter_search = 20
RS = RandomizedSearchCV(RandomForestClassifier(random_state=2018), parameters, n_iter=n_iter_search, cv=5, iid=False, scoring='roc_auc')
RS.fit(x_train_smote, y_train_smote)
RF = RS.best_estimator_
print('Test set score: {:.3f}'.format(RS.score(x_test,y_test)))
Test set score: 0.778
LogisticRegressionCV
In [10]:
from sklearn.linear_model import LogisticRegressionCV
LR = LogisticRegressionCV(class_weight='balanced', cv=5, max_iter=1000)
2.2 模型融合
Stacking
In [18]:
from mlxtend.classifier import StackingCVClassifier
StackingModel = StackingCVClassifier(classifiers=[XGB, LGBM, RF],
use_probas=True,
meta_classifier=LR,
cv=5,
)
模型评价
生成评价指标函数
In [19]:
from sklearn import metrics
def Eva(clf, x_test, y_test):
y_predic = clf.predict(x_test)
y_proba = clf.predict_proba(x_test)
acc = metrics.accuracy_score(y_test, y_predic)
p = metrics.precision_score(y_test, y_predic)
r = metrics.recall_score(y_test, y_predic)
f1 = metrics.f1_score(y_test, y_predic)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_proba[:, 1])
auc = metrics.auc(fpr, tpr)
return acc, p, r, f1, fpr, tpr, auc
生成绘图函数
In [20]:
def plot_roc(fprs, tprs, aucs, title):
plt.figure()
lw = 2
for i, name in enumerate(models):
plt.plot(fprs[i], tprs[i], lw=lw, label='{0} (AUC:{1:0.2f})'.format(name, aucs[i]))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic of '+title)
plt.legend(loc="lower right")
plt.show()
模型结果
In [21]:
models = {'LR': LR,
'RF': RF,
'XGB': XGB,
'LGBM': LGBM,
'StackingModel': StackingModel}
df_result = pd.DataFrame(columns=('Model', 'dataset', 'Accuracy', 'Precision', 'Recall', 'F1 score', 'AUC'))
row = 0
fprs_train = []
tprs_train = []
aucs_train = []
fprs_test = []
tprs_test = []
aucs_test = []
for name, clf in models.items():
clf.fit(x_train_smote, y_train_smote)
acc, p, r, f1, fpr_train, tpr_train, auc_train = Eva(clf, x_train, y_train)
fprs_train.append(fpr_train)
tprs_train.append(tpr_train)
aucs_train.append(auc_train)
df_result.loc[row] = [name, 'train', acc, p, r, f1, auc_train]
row += 1
acc, p, r, f1, fpr_test, tpr_test, auc_test = Eva(clf, x_test, y_test)
fprs_test.append(fpr_test)
tprs_test.append(tpr_test)
aucs_test.append(auc_test)
df_result.loc[row] = [name, 'test', acc, p, r, f1, auc_test]
row += 1
print(df_result)
plot_roc(fprs_train, tprs_train, aucs_train, 'train')
plot_roc(fprs_test, tprs_test, aucs_test, 'test')
Model dataset Accuracy Precision Recall F1 score AUC
0 LR train 0.76453 0.51643 0.71990 0.60142 0.82224
1 LR test 0.72570 0.48380 0.64183 0.55172 0.76500
2 RF train 0.87565 0.74389 0.75654 0.75016 0.92441
3 RF test 0.75885 0.54407 0.51289 0.52802 0.77801
4 XGB train 0.99806 1.00000 0.99215 0.99606 0.99994
5 XGB test 0.77845 0.61803 0.41261 0.49485 0.77383
6 LGBM train 0.99612 0.99344 0.99084 0.99214 0.99909
7 LGBM test 0.77694 0.61778 0.39828 0.48432 0.78034
8 StackingModel train 0.99742 0.99606 0.99346 0.99476 0.99987
9 StackingModel test 0.78674 0.64474 0.42120 0.50953 0.79101