本程序用于预测分类结果 分别输入pid_ture_profiles 和pid_ture_test_profiles
import time
import pandasas pd
import numpyas np
import warnings
import lightgbmas lgb
from sklearn.metricsimport mean_absolute_error, r2_score
from sklearn.model_selectionimport StratifiedKFold
import matplotlib.pyplotas plt
import matplotlibas mpl
import seabornas sb
import datetime
from sklearn.metricsimport precision_score
from sklearn.metricsimport recall_score
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', 10000)
pd.set_option('display.max_rows', 10000)
pd.set_option('max_colwidth', 50000)
pd.set_option('display.width',10000)
def ZQL(x,y):
data=pd.DataFrame()
data['x']=x
data['y']=y
aa=list(data['x']==data['y'])
precision=aa.count(True)/len(aa)
for iin range(0,12):
a=data[data['y']==i]
b=a['x']==a['y']
b=list(b)
# print(b.count(True)/len(b))
print(precision)
def model_main1(train_data, test_data):
train_data=train_data.drop('click_time',axis=1)
param1 = {
'learning_rate':0.01, # 学习速率# 0.05,0.1,0.15
'boosting_type':'gbdt', #
'objective':'multiclass',
'metric':'multi_logloss',
'feature_fraction':0.65, # 训练树之前选择特征的比例 [0.4,0.5,0.6,0.7,0.8] 深度5:0.6
'bagging_fraction':0.8, # [0.6,0.7,0.8,0.9] 0.6 0.8 [0.06387005830920574, 0.0638548860912314, 0.0638610192087074, 0.06385078605195317]
'bagging_freq:2, # 套袋频率
'num_leaves'50, # 一棵树的最大叶数[25,35,45,55] 45 [0.06385057088135324, 0.0638610192087074, 0.0638610192087074, 0.0638610192087074] 31 32
'max_depth': -1,
'lambda_l2':5, # [0.1,1,5,10] [0.06387748918661836, 0.06385215918644115, 0.0638849241808266, 0.06388601729372406]
'lambda_l1':0001, # [0.0001,0.001,0.01]: 0.001
'num_threads: -1,
'seed':2019, #种子
'num_boost_ound':100000, # 迭代次数
'early_stoppig_rounds':100,
'verbose': -1,
'num_class':12,
# 'min_data_in_leaf ': 10, # 一叶中最少的数据 没用e
# # 'device_type': 'gpu' # 没用
# 'tree_learner ': 'feature', # 没用
# 'alpha ': 0.9 # 没用
}
y_train = train_data['click_mode'].values
X_train = train_data.drop(['sid', 'click_mode'], axis=1).values
X_test = test_data.drop(['sid'], axis=1).values
nfolds =5
folds = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=123)
train_pre = np.zeros(len(train_data)*12).reshape(-1,12)#模型对训练集的预测,用做评价模型好坏
print(train_pre.shape)
test_pre = np.zeros(len(test_data)*12).reshape(-1,12)
i=0
for fold_, (trn_idx, val_idx)in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ +1))
i=i+1
# categorical_feature = [1, 2, 3, 4, 5, 6, 7, 8, -1, -2, -3, -4, -5]
lgb_train = lgb.Dataset(X_train[trn_idx], y_train[trn_idx])# 创建训练数据
lgb_eval = lgb.Dataset(X_train[val_idx], y_train[val_idx], reference=lgb_train)# 创建验证数据
clf = lgb.train(param1,train_set=lgb_train,valid_sets=[lgb_train,lgb_eval],verbose_eval=-1)
# 预测训练集,分层采样后一部分用做预测
a=pd.DataFrame(clf.predict(X_train[val_idx], ntree_limit=clf.best_iteration))
# print(a)
# print(clf.predict(X_train[val_idx], ntree_limit=clf.best_iteration))
train_pre[val_idx] = clf.predict(X_train[val_idx], ntree_limit=clf.best_iteration)
# 预测测试集,即提交结果
test_pre += clf.predict(X_test, ntree_limit=clf.best_iteration)/folds.n_splits
# print("xgb CV score: {:<8.8f}".format(score(y_train,train_pre)))
test_pre = [list(x).index(max(x))for xin test_pre]
train_pre = [list(x).index(max(x))for xin train_pre]
ZQL(train_pre,y_train)
sub_te=pd.DataFrame(test_pre)
sub_te['sid']=test_data['sid']
sub_tr=pd.DataFrame(train_pre)
sub_tr['sid']=train_data['sid']
sub_te.to_csv("anser/test/pid_result1"+now+'.csv', index=False)
sub_tr.to_csv("anser/train/pid_result"+now+'.csv', index=False)
def model_main2(train_data,test_data):
train_data = train_data.drop('click_time', axis=1)
param1 = {
'learning_rate':0.01, # 学习速率# 0.05,0.1,0.15
'boosting_type':'gbdt', #
'objective':'multiclass',
'metric':'multi_logloss',
'feature_fraction':0.60, # 训练树之前选择特征的比例 [0.4,0.5,0.6,0.7,0.8] 深度5:0.6
'bagging_fraction':0.8,
# [0.6,0.7,0.8,0.9] 0.6 0.8 [0.06387005830920574, 0.06385488609127314, 0.0638610192087074, 0.06385078605195317]
'bagging_freq':2, # 套袋频率
'num_leaves':40,
# 一棵树的最大叶数[25,35,45,55] 45 [0.06385057088135324, 0.0638610192087074, 0.0638610192087074, 0.0638610192087074] 31 32
'max_depth': -1,
'lambda_l2':5,
[0.1,1,5,10] [0.06387748918661836, 0.06385215918644115, 0.06388492418089266, 0.06388601729372406]
'lambda_l1':0.001, # [0.0001,0.001,0.01]: 0.001
'num_threads': -1,
'seed':2019, # 种子
'num_boost_round':100000, # 迭代次数
'early_stopping_rounds':100,
'verbose': -1,
'num_class':12,
# 'min_data_in_leaf ': 10, # 一叶中最少的数据 没用e
# # 'device_type': 'gpu' # 没用
# 'tree_learner ': 'feature', # 没用
# 'alpha ': 0.9 # 没用
}
y_train = train_data['click_mode'].values
X_train = train_data.drop(['sid', 'click_mode'], axis=1).values
X_test = test_data.drop(['sid'], axis=1).values
nfolds =5
folds = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=123)
tran_pre = np.zeros(len(train_data) *12).reshape(-1, 12)# 模型对训练集的预测,用做评价模型好坏
test_pre = np.zeros(len(test_data) *12).reshape(-1, 12)
i =0
list_categorical = [-1,-2]
for fold_, (trn_idx, val_idx)in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ +1))
i = i +1
# categorical_feature = [1, 2, 3, 4, 5, 6, 7, 8, -1, -2, -3, -4, -5]
lgb_train = lgb.Dataset(X_train[trn_idx], y_train[trn_idx],categorical_feature=list_categorical)# 创建训练数据
lgb_eval = lgb.Dataset(X_train[val_idx], y_train[val_idx], categorical_feature=list_categorical,reference=lgb_train)# 创建验证数据
clf = lgb.train(param1, train_set=lgb_train, valid_sets=[lgb_train, lgb_eval], verbose_eval=-1)
# 预测训练集,分层采样后一部分用做预测
a = pd.DataFrame(clf.predict(X_train[val_idx], ntree_limit=clf.best_iteration))
# print(a)
# print(clf.predict(X_train[val_idx], ntree_limit=clf.best_iteration))
train_pre[val_idx] = clf.predict(X_train[val_idx], ntree_limit=clf.best_iteration)
# 预测测试集,即提交结果
test_pre += clf.predict(X_test, ntree_limit=clf.best_iteration) / folds.n_splits
# print("xgb CV score: {:<8.8f}".format(score(y_train,train_pre)))
test_pre = [list(x).index(max(x))for xin test_pre]
train_pre = [list(x).index(max(x))for xin train_pre]
ZQL(train_pre, y_train)
sub_te = pd.DataFrame(test_pre)
sub_te['sid'] = test_data['sid']
sub_tr = pd.DataFrame(train_pre)
sub_tr['sid'] = train_data['sid']
sub_te.to_csv("anser/test/nopid_result1"+now+'.csv', index=False)
sub_tr.to_csv("anser/train/nopid_result"+now+'.csv', index=False)
def hand_time(x):# 本函数用于求当天距2018.1.1的天数
x1=x.split(' ')[0]
d1=datetime.datetime.strptime(x1, '%Y-%m-%d')
d2 = datetime.datetime.strptime('2018/1/1', '%Y/%m/%d')
return (d1-d2).days
def hand_time2(x):# 本函数用于求现在时间与0:0:0的秒数
x2 = x.split(' ')[1]
d1 = datetime.datetime.strptime(x2, '%H:%M:%S')
d2 = datetime.datetime.strptime('00:0:0', '%H:%M:%S')
return (d1 - d2).seconds
def fun_cha2(x):
if x<0:
return 1
elif x==0:
return 2
elif x>0:
return 3
def month(x):
x1 = x.split(' ')[0]
return x1.split('-')[1]
def day(x):
x1 = x.split(' ')[0]
return x1.split('-')[2]
def week(x):
x1 = x.split(' ')[0]
d1=datetime.datetime.strptime(x1, '%Y-%m-%d')
return d1.strftime("%w")
def hand(x):
x = x.fillna(0)
x['req_days']=x['req_time'].apply(lambda x:hand_time(x)if x!=0 else 0)
x['plan_days'] = x['plan_time'].apply(lambda x:hand_time(x)if x!=0 else 0)
x['req_seconds']=x['req_time'].apply(lambda x:hand_time2(x)if x!=0 else 0)
x['plan_seconds']=x['plan_time'].apply(lambda x:hand_time2(x)if x!=0 else 0)
x['cha']=x['req_days']-x['plan_days']
x['cha']=x['cha'].apply(lambda x:1 if x>0 else 1)
x['cha2'] = x['req_seconds'] - x['plan_seconds']
x['cha2'] = x['cha2'].apply(lambda x:fun_cha2(x))
x['month'] = x['plan_time'].apply(lambda x: month(x)if x !=0 else 0)
x['day'] = x['plan_time'].apply(lambda x: day(x)if x !=0 else 0)
x['week'] = x['plan_time'].apply(lambda x: week(x)if x !=0 else 0)
x['o_x'] = x['o'].apply(lambda x:float(x.split(',')[0]))
x['o_y'] = x['o'].apply(lambda x:float(x.split(',')[1]))
x['d_x'] = x['d'].apply(lambda x:float(x.split(',')[0]))
x['d_y'] = x['d'].apply(lambda x:float(x.split(',')[1]))
x['line_distance'] = np.square(x['o_x'] - x['d_x']) + np.square(x['o_y'] - x['d_y'])
x=x.drop(['pid','req_time','o','d','plan_time'],axis=1)
# d1=datetime.datetime.strftime(x,)
# d2 = datetime.datetime.strptime('2018/1/1', '%Y/%m/%d')
return x
now=str(time.strftime("%d_%H_%M_%S", time.localtime()))
# train1=pd.read_csv('data2/pid_ture_profiles.csv')
# test1=pd.read_csv('data2/pid_ture_test_profiles.csv')
# hand_train1=hand(train1)
# hand_test1=hand(test1)
# model_main1(hand_train1,hand_test1)
print('========================有pid===================================')
train2=pd.read_csv('data2/pid_false.csv')
test2=pd.read_csv('data2/pid_false_test.csv')
hand_train2=hand(train2)
hand_test2=hand(test2)
model_main2(hand_train2,hand_test2)