在数据处理中常会需要快速查看各数据之间的关系,能否通过历史预测未来。
这里把这种分析“历史”到预测“未来”的叫做回归(regression),当然有些地方也会叫做拟合。
常见的回归方式有线性回归和非线性回归。在本文中线性回归以岭回归为代表,非线性委会使用了高斯回归为代表,顺带还介绍了下基于神经网络的回归算法。
数据准备
为简单起见,这里给出了带高斯噪声的一维正弦函数为基础数据进行分析,具体构造代码如下:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
# 定义数据
x_data = np.linspace(0, 10, 100)
y_data = np.sin(x_data) + np.random.normal(0, 0.1, 100)
如果使用所有的数据来训练会有过拟合问题(overfit),这里切分出80%的数据作为训练集,20%的数据作为测试集;
rng = np.random.RandomState(0)
training_sample_indices = rng.choice(np.arange(0, 100), size=80, replace=False)
training_data = x_data[training_sample_indices]
training_noisy_target = y_data[training_sample_indices] + 0.2 * rng.randn(
len(training_sample_indices)
)
绘制原始数据如下:
import numpy as np
plt.figure("Gaussian Regression")
plt.plot(x_data, y_data, 'bo', label='gt signal')
#plt.plot(x_data, y_data, label="gt signal", linewidth=2)
plt.scatter(
training_data,
training_noisy_target,
color="black",
label="Noisy measurements",
)
plt.legend()
plt.xlabel("data")
plt.ylabel("target")
_ = plt.title(
"Gaussian Regression process"
)
plt.show()
线性回归
ridge 回归
#%% lieanr model
from sklearn.linear_model import Ridge
rng = np.random.RandomState(0)
training_sample_indices = rng.choice(np.arange(0, 100), size=60, replace=False)
training_data = x_data[training_sample_indices]
training_noisy_target = y_data[training_sample_indices] + 0.2 * rng.randn(
len(training_sample_indices)
)
ridge = Ridge().fit(training_data, training_noisy_target)
plt.figure("Ridge Regression")
plt.plot(x_data, y_data, 'bo', label='gt signal')
plt.scatter(
training_data,
training_noisy_target,
color="black",
label="Noisy measurements",
)
plt.plot(x_data, ridge.predict(x_data), label="Ridge regression")
plt.legend()
plt.xlabel("data")
plt.ylabel("target")
_ = plt.title("Limitation of a linear model such as ridge")
plt.show()
对于添加kernal后线性回归的能力会产生质的飞跃:
#%% ridge kernal
from sklearn.gaussian_process.kernels import ExpSineSquared
from sklearn.kernel_ridge import KernelRidge
rng = np.random.RandomState(0)
training_sample_indices = rng.choice(np.arange(0, 100), size=60, replace=False)
training_data = x_data[training_sample_indices]
training_noisy_target = y_data[training_sample_indices] + 0.2 * rng.randn(
len(training_sample_indices)
)
kernel_ridge = KernelRidge(kernel=ExpSineSquared())
kernel_ridge.fit(training_data, training_noisy_target)
plt.figure("Kernal Ridge Regression")
plt.plot(x_data, y_data, 'bo', label='gt signal')
plt.scatter(
training_data,
training_noisy_target,
color="black",
label="Noisy measurements",
)
plt.plot(
x_data,
kernel_ridge.predict(x_data),
label="Kernel ridge",
linewidth=2,
linestyle="dashdot",
)
from scipy.stats import loguniform
from sklearn.model_selection import RandomizedSearchCV
#random kernal
param_distributions = {
"alpha": loguniform(1e0, 1e3),
"kernel__length_scale": loguniform(1e-2, 1e2),
"kernel__periodicity": loguniform(1e0, 1e1),
}
kernel_ridge_tuned = RandomizedSearchCV(
kernel_ridge,
param_distributions=param_distributions,
n_iter=500,
random_state=0,
)
kernel_ridge_tuned.fit(training_data, training_noisy_target)
predictions_kr = kernel_ridge_tuned.predict(x_data)
plt.plot(
x_data,
predictions_kr,
label="Kernel ridge tuned hyperparameters",
linewidth=2,
linestyle="dashdot",
)
plt.legend(loc="lower right")
plt.xlabel("data")
plt.ylabel("target")
_ = plt.title(
"Kernel ridge regression with an exponential sine squared"
)
非线性回归
高斯回归
高斯回归也是使用了离散采样的和核心点作为种子回归高斯模型。
#%%
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = 1.0 * ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) + WhiteKernel(
1e-1
)
gaussian_process = GaussianProcessRegressor(kernel=kernel)
gaussian_process.fit(training_data, training_noisy_target)
mean_predictions_gpr, std_predictions_gpr = gaussian_process.predict(
x_data,
return_std=True,
)
plt.figure("Kernal Gaussian Regression")
plt.plot(x_data, y_data, label="True signal", linewidth=2, linestyle="dashed")
plt.scatter(
training_data,
training_noisy_target,
color="black",
label="Noisy measurements",
)
# Plot the predictions of the gaussian process regressor
plt.plot(
x_data,
mean_predictions_gpr,
label="Gaussian process regressor",
linewidth=2,
linestyle="dotted",
)
plt.fill_between(
x_data.ravel(),
mean_predictions_gpr - std_predictions_gpr,
mean_predictions_gpr + std_predictions_gpr,
color="tab:green",
alpha=0.2,
)
plt.legend(loc="lower right")
plt.xlabel("data")
plt.ylabel("target")
_ = plt.title("gaussian process regressor")
神经网络
这里使用了sklearn内的多层感知机模型(MLP),给出计算结果
#%% MLP
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
scaler = StandardScaler()
scaler.fit(training_data)
x_data_mlp = scaler.transform(training_data)
# 定义模型
mlp = MLPRegressor(hidden_layer_sizes=(100,), # 一个隐藏层,包含100个神经元
max_iter=1000, # 最大迭代次数
activation='relu', # 激活函数使用ReLU
solver='adam', # 优化算法使用Adam
alpha=0.00001, # L2正则化参数
random_state=42)
# trian
mlp.fit(x_data_mlp, training_noisy_target)
# predict
y_pred = mlp.predict(scaler.transform(x_data))
# plot resut
plt.figure("MLP regression")
plt.plot(x_data, y_data, 'bo', label='gt signal')
plt.plot(x_data, y_pred, 'r-', label='pred result')
plt.scatter(
training_data,
training_noisy_target,
color="black",
label="Noisy measurements",
)
plt.legend()
plt.xlabel("data")
plt.ylabel("target")
_ = plt.title(
"MLP trainning result")
计算结果如下: