Lightgbm支持两种形式的调用接口:原生形式和sklearn接口的形式。
原生形式
sklearn接口的形式
- 导入包
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
2.加载boston房价数据
boston = load_boston()
data = boston.data
target = boston.target
3.切分数据集
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2)
4.创建成lgb特征的数据集格式
lgb_train = lgb.Dataset(X_train, y_train)
#If this is Dataset for validation, training data should be used as reference.
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
5.将参数写成字典下形式(Boston房价问题为回归问题,objective设置为regression)
params = {
'task': 'train',
'boosting_type': 'gbdt', # 设置提升类型
'objective': 'regression', # 目标函数
'metric': {'l2', 'auc'}, # 评估函数
'num_leaves': 31, # 叶子节点数
'learning_rate': 0.05, # 学习速率
'feature_fraction': 0.9, # 建树的特征选择比例
'bagging_fraction': 0.8, # 建树的样本采样比例
'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging
'verbose': 1 # <0 显示致命的, =0 显示错误 (警告), >0 显示信息
}
6.交叉验证(CrossValidation)及训练
- num_boost_round : int, optional (default=100),Number of boosting iterations.
gbm = lgb.train(params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, early_stopping_rounds=5)
7.保存模型到文件
import joblib
#gbm.save_model('model.txt')
joblib.dump(lgb, '.lgb.pkl')
8.预测数据集
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
9.评估模型
print('The MSE of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
sklearn接口的形式
1.加载数据
boston = load_boston()
data = boston.data
target = boston.target
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2)
2.创建模型,训练模型
gbm = lgb.LGBMRegressor(objective='regression', num_leaves=31, learning_rate=0.05, n_estimators=20)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric='l1', early_stopping_rounds=5)
3.模型预测
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
4.模型评估
print('The MSE of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
5.特征重要性
print('Feature importances:', list(gbm.feature_importances_))
6.网格搜索,参数优化
from sklearn.model_selection import GridSearchCV
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)