Part 1、理论基础
将不同分类器组合起来的方法叫“集成方法”(ensemble method)或者“元算法”(meta-algorithm),集成的形式有很多,可以是不同算法的集成、同一算法在不同设置下的集成、还可以是数据集的不同部分分配给不同分类器之后的集成。
自举汇聚法(booststrap aggregating,bagging)是在原始数据集上选择S次得到S个新的数据集,然后将某个算法分别应用于这S个数据集,则得到S个分类器;需要对新的数据进行分类时,即可用这S个分类器进行分类,对各分类结果进行投票,得票多的类别即为分类结果。
boosting是通过集中关注被已有分类器错分的数据来获得新的分类器,其分类结果是基于所有分类器加权求和得到的,分类器权重代表对应分类器在上一轮迭代中的成功度,所以各分类器的权重并不相同。(boosting有多个版本,本文只讨论AdaBoost)
自适应boosting(adaptive boosting,AdaBoost)的简单步骤如下:
step1:样本权重D初始化;
step2:训练弱分类器,并计算该分类器的错误率;
step3:计算分类器权重;
step4:计算样本权重;
step5:计算错误率;
step6:判断错误率是否达到要求,若达到则break,否则返回step2。
相关参数的计算公式如下:
Part 2、算法实现
# 0
# 简单数据集
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
# 1
# 单层决策树生成函数
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr)
labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0
bestStump = {}
bestClasEst = mat(zeros((m,1)))
minError = inf
for i in range(n): # 第一个for循环用于计算第i个属性的取值范围,从而得到步长
rangeMin = dataMatrix[:,i].min()
rangeMax = dataMatrix[:,i].max()
stepSize = (rangeMax - rangeMin)/numSteps
for j in range(-1,int(numSteps)+1): # 控制遍历次数
for inequal in ['lt','gt']:
threshVal = (rangeMin + float(j)*stepSize) #此次的阈值
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)
#调用stumpClassify()进行分类
errArr = mat(ones((m,1))) #初始化都分类错误
errArr[predictedVals == labelMat] = 0 #未分错的改为0
weightedError = D.T * errArr #加权错误率
print "split:dim %d, thresh %.2f, thresh inequal:%s, the weighted error is %.3f" % \
(i,threshVal,inequal,weightedError)
if weightedError < minError: #判断错误是否减小到阈值允许的范围内
minError = weightedError #若满足,则更新阈值
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump, minError, bestClasEst
# 测试
D = mat(ones((5,1))/5)
datMat, classLabels = loadSimpData()
bestStump, minError, bestClasEst = buildStump(datMat, classLabels, D)
print bestStump,'\n', minError,'\n', bestClasEst
得到类似测试结果
# 2
# 基于单层决策树的AdaBoost训练过程
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m)
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
print ("="*40)
bestStump,error,classEst = buildStump(dataArr,classLabels,D)
print "本次样本权重D:",D.T
print "本次分类结果classEst:",classEst.T
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))
print "通过error得到本次分类器权重alpha:",alpha
bestStump['alpha'] = alpha
weakClassArr.append(bestStump)
expon = multiply(-1*alpha*mat(classLabels).T,classEst)
print "通过alpha得到D中e的系数:",expon.T
D = multiply(D,exp(expon))
D = D/D.sum()
print "从而得到样本权重D的更新:",D.T
aggClassEst += alpha*classEst
print "通过alpha得到加权分类中间结果aggClassEst: ",aggClassEst.T
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T, ones((m,1)))
a = sign(aggClassEst)
print "从而得到加权分类最终结果sign(aggClassEst)",a.T
print "从而得到加权分类最终结果的错误分布aggErrors:",aggErrors.T
errorRate = aggErrors.sum()/m
print "total error:",errorRate,"\n"
if errorRate == 0.0:
break
return weakClassArr,aggClassEst #这里的aggClassEst是后面算法中添加的,在测试adaClassify()时需要将该参数删去
# 测试
datMat, classLabels = loadSimpData()
classifierArray = adaBoostTrainDS(datMat, classLabels, 9)
print classifierArray
得到类似测试结果
# 3
# AdaBoost分类函数
def adaClassify(datToClass,classifierArr):
#datToClass是待分类数据,可以是一个数据,也可以是一组数据
#classifierArr是已经训练好的多个弱分类器组成的数组
dataMatrix = mat(datToClass)
m = shape(dataMatrix)[0]
aggClassEst = mat(zeros((m,1)))
for i in range(len(classifierArr)): #对待分类数据遍历弱分类器
classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'], \
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])
aggClassEst += classifierArr[i]['alpha']*classEst
#累加得到分类中间结果
print '累加%d个弱分类器得到的分类中间结果aggClassEst:\n' % i,aggClassEst
return sign(aggClassEst) #最终分类结果
# 测试
datMat, labelArr = loadSimpData()
classifierArr = adaBoostTrainDS(datMat, labelArr,30)
print "训练得到的弱分类器组:",classifierArr
print ('='*40)
print '以上为训练弱分类器过程\n'
result = adaClassify([0,0],classifierArr)
print '最终分类结果:',result
得到类似测试结果
# 4
# 自适应数据加载函数
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t'))
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
# 测试
datArr, labelArr = loadDataSet('horseColicTraining2.txt')
classifierArray = adaBoostTrainDS(datArr, labelArr, 10)
testArr, testLabelArr = loadDataSet('horseColicTest2.txt')
prediction10 = adaClassify(testArr,classifierArray)
#以下计算错分个数
errArr = mat(ones((67,1)))
errNum = errArr[prediction10 != mat(testLabelArr).T].sum()
print '错分数量:',errNum
得到类似测试结果
# 5
# ROC曲线的绘制以及AUC计算函数
def plotROC(predStrengths, classLabels):
import matplotlib.pylab as plt
cur = (1.0,1.0)
ySum = 0.0
numPosClas = sum(array(classLabels)==1.0)
yStep = 1/float(numPosClas)
xStep = 1/float(len(classLabels)-numPosClas)
sortedIndicies = predStrengths.argsort()
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0
delY = yStep
else:
delX = xStep
delY = 0
ySum += cur[1]
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY],c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.title('ROC curve for AdaBoost Horse Colic Detection System')
ax.axis([0,1,0,1])
plt.show()
print 'the area under the curve is:',ySum*xStep
# 测试
datArr, labelArr = loadDataSet('horseColicTraining2.txt')
classifierArray,aggClassEst = adaBoostTrainDS(datArr, labelArr, 10)
plotROC(aggClassEst.T,labelArr)
得到类似测试结果