2019-12-15

逻辑回归实例操作

importnumpyasnp

importmatplotlib.pyplotasplt

fromsklearnimportdatasets

# 加载鸢尾花数据集

iris = datasets.load_iris()

X = iris.data[:, [0,2]]

y = iris.target

# 绘制散点图

plt.scatter(X[y==0,0], X[y==0,1])

plt.scatter(X[y==1,0], X[y==1,1])

plt.scatter(X[y==2,0], X[y==2,1])

plt.show()

fromsklearn.model_selectionimporttrain_test_split

fromsklearn.linear_modelimportLogisticRegression

# 拆分训练集和测试集

X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

# 调用逻辑回归算法

lr = LogisticRegression(solver='newton-cg', multi_class='multinomial')

lr.fit(X_train, y_train)

# 算法评分

print('训练得分:', lr.score(X_train, y_train))

print('测试得分:', lr.score(X_test, y_test))

frommatplotlib.colorsimportListedColormap

# 定义绘制决策边界的函数

defplot_decision_boundary(model, axis):

x0, x1 = np.meshgrid(

np.linspace(axis[0], axis[1], int((axis[1]-axis[0])*100)).reshape(-1,1),

np.linspace(axis[2], axis[3], int((axis[3]-axis[2])*100)).reshape(-1,1)

)

X_new = np.c_[x0.ravel(), x1.ravel()]

y_predict = model.predict(X_new)

zz = y_predict.reshape(x0.shape)

custom_cmap = ListedColormap(['#EF9A9A','#FFF59D','#90CAF9'])

plt.contourf(x0, x1, zz, cmap=custom_cmap)

# 绘制决策边界

plot_decision_boundary(lr, axis=[4,8,0,8])

plt.scatter(X[y==0,0], X[y==0,1])

plt.scatter(X[y==1,0], X[y==1,1])

plt.scatter(X[y==2,0], X[y==2,1])

plt.show()

fromsklearn.metricsimportprecision_score, recall_score, f1_score

# 计算预测值

y_pred = lr.predict(X_test)

# 计算精准率

print('精准率:', precision_score(y_test, y_pred, average="micro"))

# 计算召回率

print('召回率:', recall_score(y_test, y_pred, average="micro"))

# 计算 F1分数

print('F1分数:', f1_score(y_test, y_pred, average="micro"))

©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容