正规方程(LinearRegression) | 梯度下降(SGDRegressor) |
---|---|
适用于小规模数据集特征数量<100K
|
适用于大规模数据集>10K
|
一次运算得出 | 需要指定学习率, 须多次运算(迭代)得出 |
只适用于线性模型(可能出现过拟合问题) | 适用于各种模型 |
from sklearn.linear_model import LinearRegression, SGDRegressor, Ridge, LogisticRegression
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
def boston_line():
lb = load_boston()
x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)
# 为数据增加一个维度,相当于把[1, 5, 10] 变成 [[1, 5, 10],]
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# 进行标准化
std_x = StandardScaler()
x_train = std_x.fit_transform(x_train)
x_test = std_x.transform(x_test)
std_y = StandardScaler()
y_train = std_y.fit_transform(y_train)
y_test = std_y.transform(y_test)
# 正规方程预测
lr = LinearRegression()
lr.fit(x_train, y_train)
lr_predict = std_y.inverse_transform(lr.predict(x_test))
print("正规方程预测出的系数为:", lr.coef_)
# 计算均方误差参数必须使用标准化之前的值
print("正规方程的均方误差为:", mean_squared_error(std_y.inverse_transform(y_test),lr_predict))
print("正规方程预测出的结果为:", lr_predict)
# 梯度下降预测
sgd = SGDRegressor()
sgd.fit(x_train, y_train)
sgd_predict = std_y.inverse_transform(sgd.predict(x_test))
print("梯度下降预测出的系数为:", sgd.coef_)
# 计算均方误差参数必须使用标准化之前的值
print("梯度下降的均方误差为:", mean_squared_error(std_y.inverse_transform(y_test),sgd_predict))
print("梯度下降预测出的结果为:", sgd_predict)
# 岭回归下降预测(alpha为正则化力度, 数值越大,特征对最后的结果影响越小)
rid = Ridge(alpha=1.0)
rid.fit(x_train, y_train)
rid_predict = std_y.inverse_transform(rid.predict(x_test))
print("岭回归预测出的系数为:", rid.coef_)
# 计算均方误差参数必须使用标准化之前的值
print("岭回归的均方误差为:", mean_squared_error(std_y.inverse_transform(y_test),rid_predict))
print("岭回归预测出的结果为:", rid_predict)
# 保存岭回归的模型(pkl格式)
joblib.dump(rid, "./rid_model.pkl")
# 加载岭回归的模型
new_rid = joblib.load("./rid_model.pkl")
new_rid_predict = std_y.inverse_transform(new_rid.predict(x_test))
print("模型-->岭回归预测出的系数为:", rid.coef_)
# 计算均方误差参数必须使用标准化之前的值
print("模型-->岭回归的均方误差为:", mean_squared_error(std_y.inverse_transform(y_test),new_rid_predict))
print("模型-->岭回归预测出的结果为:", new_rid_predict)
if __name__ == '__main__':
boston_line()
关于拟合曲线程度的判定(正常拟合为最佳)
关于拟合程度(以识别汽车为例):
欠拟合:
如果只选取"四个轮子",作为判断汽车的依据, 那"超市小推车"可能也会被识别为汽车,这就是欠拟合
过拟合:
如果选取"四个轮子","有发动机", "有方向盘","黑色",作为汽车的判断依据,那"红色的跑车"就无法被识别为汽车,显然这里多选用了"黑色"这一特征,这就是过拟合