1、什么是机器学习
人和计算机一样,都是一大批互相连接的信息传递和存储元素所组成的系统
应用:百度的图片识别(机器学习的视觉处理系统)、房价预测、股票涨跌
实现机器学习的方法就是算法,分为4到5类
监督学习:给计算机不断的提供数据和数据对应的值,通过指引的方法让其自己学习
非监督学习:只提供图片,不告诉结果,让机器自己观察各数据之间的特性,发现特性背后的规律,判断和分类
半监督学习:利用少量有标签的样本和大量没有标签的样本进行训练和分类
强化学习:把计算机丢到一个完全陌生的环境,或者让他完成一项从未接触过得任务,它自己会尝试各种手段,最后让自己成功适应这一个陌生的环境或者学会完成这件任务的方法途径
遗传算法(类似强化学习):模拟进化理论,淘汰弱者,适者生存
2、why
利用Python的机器学习模块sklearn scikit-learn 来实现机器学习,它包括很多机器学习的类型,监督学习包括分类和线性regression学习
3、安装sklearn
前提:python(>=2.6 or >=3.3) numpy (>=1.6.1) scipy(>=0.9)
安装:pip install scikit-learn
or conda install scikit-learn
4、如何选择机器学习方法
分类 回归 非监督学习
5、通用学习模式
#导入模块
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from slearn.neighors import KNeighborsClassifier
import matplotlib.pyplot as plt
#划分数据集
iris=datasets.load_iris()
iris_x=iris.data
iris_y=iris.target
x_train,x_test,y_train,y_test=train_test_split(iris_x,iris_y,test_size=0.3)
#选择模型方法训练数据
knn=KNeighborsClassifier()
knn.fit(x_train,y_train)
#训练对比测试集的预测值和真实值
print(knn.predict(x_test)) #预测值
print(y_test) #真实值
6、sklearn 的 datasets数据库
#原有数据
datasets.load_iris()
datasets.load_Boston()等
#生成数据
datasets.make_regression()
x,y=datasets.make_regression(n_samples=100,n_features=1,n_targets=1,noise=1) #noise 越大,线越离散
7、model常用属性和功能
#常用属性
y=0.1x+0.3
model.coef_ #输出0.1
model.intercept_ #输出0.3
#常用功能
model.fit()
model.predict()
model.get_params() #得到模型的参数
model.score() #查看准确度
8 、normalization 标准化数据
#导入模块
from __future__ import print_function
from sklearn import preprocessing
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets.samples_generator import make_classification
from sklearn.svm import SVC
import matplotlib.pyplot as plt
#生成数据
X, y = make_classification(n_samples=300, n_features=2 , n_redundant=0, n_informative=2,
random_state=22, n_clusters_per_class=1, scale=100) #生成数据
# normalization step
X = preprocessing.scale(X)
# 划分数据
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3)
# 模型训练
clf = SVC()
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
9 、怎样检验模型
将收到的数据分为训练和测试数据7:3,对于神经网络的评价一般基于测试数据给出的结果
评价机器学习的依据:误差、精确度(分类)、R2(分类+回归)
10 、cross validation 交叉验证
交叉验证可以选出好的模型、来判断model好不好,对比不同参数,不同model或者不同x的属性来判断
#导入模块
from __future__ import print_function #这句的意思是如果Python版本是2.x则需要像3一样的语法 加括号使用 否则报错
from sklearn.datasets import load_iris #从数据集中加载燕尾花数据集
from sklearn.cross_validation import train_test_split #从交叉验证中加载数据集划分方法
from sklearn.neighbors import KNeighborsClassifier #从邻居中加载k近邻方法
#加载数据
iris = load_iris()
X = iris.data
y = iris.target
# test train split # 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=4)
knn = KNeighborsClassifier(n_neighbors=5) #参数附近邻居选择5
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test) #预测
print(knn.score(X_test, y_test)) #得分
# this is cross_val_score #
from sklearn.cross_validation import cross_val_score
knn = KNeighborsClassifier(n_neighbors=5) #参数选择5
scores = cross_val_score(knn, X, y, cv=5, scoring='accuracy') #数据集分类5次
print(scores.mean()) #输出5次的均值更准确
# this is how to use cross_val_score to choose model and configs # 测试实验选什么参数或者选什么model
from sklearn.cross_validation import cross_val_score
import matplotlib.pyplot as plt
k_range = range(1, 31)
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
## loss = -cross_val_score(knn, X, y, cv=10, scoring='mean_squared_error') # for regression 误差
scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy') # for classification
k_scores.append(scores.mean())
#plt展示
plt.plot(k_range, k_scores) #绘制经过点的曲线 scatter散点图
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
plt.show()
过拟合
from __future__ import print_function
from sklearn.learning_curve import learning_curve
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits = load_digits()
X = digits.data
y = digits.target
train_sizes, train_loss, test_loss= learning_curve(
SVC(gamma=0.01), X, y, cv=10, scoring='mean_squared_error',
train_sizes=[0.1, 0.25, 0.5, 0.75, 1])
train_loss_mean = -np.mean(train_loss, axis=1)
test_loss_mean = -np.mean(test_loss, axis=1)
plt.plot(train_sizes, train_loss_mean, 'o-', color="r",
label="Training")
plt.plot(train_sizes, test_loss_mean, 'o-', color="g",
label="Cross-validation")
plt.xlabel("Training examples")
plt.ylabel("Loss")
plt.legend(loc="best") #显示
plt.show()
解决方法
from __future__ import print_function
from sklearn.learning_curve import validation_curve
from sklearn.datasets import load_digits
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits = load_digits()
X = digits.data
y = digits.target
param_range = np.logspace(-6, -2.3, 5)
train_loss, test_loss = validation_curve(
SVC(), X, y, param_name='gamma', param_range=param_range, cv=10,
scoring='mean_squared_error')
train_loss_mean = -np.mean(train_loss, axis=1)
test_loss_mean = -np.mean(test_loss, axis=1)
plt.plot(param_range, train_loss_mean, 'o-', color="r",
label="Training")
plt.plot(param_range, test_loss_mean, 'o-', color="g",
label="Cross-validation")
plt.xlabel("gamma")
plt.ylabel("Loss")
plt.legend(loc="best")
plt.show()
11 、保存save
from sklearn import svm
from skefrom __future__ import print_function
from sklearn import svm
from sklearn import datasets
clf = svm.SVC()
iris = datasets.load_iris()
X, y = iris.data, iris.target
clf.fit(X, y)
# method 1: pickle python 自带的功能
import pickle
# save #先定义文件夹save
with open('save/clf.pickle', 'wb') as f:
pickle.dump(clf, f)
# restore
with open('save/clf.pickle', 'rb') as f:
clf2 = pickle.load(f)
print(clf2.predict(X[0:1]))
# method 2: joblib
from sklearn.externals import joblib
# Save
joblib.dump(clf, 'save/clf.pkl')
# restore
clf3 = joblib.load('save/clf.pkl')
print(clf3.predict(X[0:1]))