数据挖掘Python

2.2 数据挖掘

PLA算法

Python代码

import numpy as np
from numpy import *
import random

def pla():
    W=np.ones(4)#initial all weight with 1
    count=0
    dataset=[[1,0.10723,0.64385, 0.29556    ,1],
            [1 ,0.2418, 0.83075, 0.42741,   1],
            [1 ,0.23321 ,0.81004 ,0.98691,  1],
            [1 ,0.36163, 0.14351 ,0.3153,   -1],
            [1, 0.46984, 0.32142, 0.00042772,   -1],
            [1, 0.25969, 0.87208 ,0.075063, -1],
            ]

    while True:
        count+=1
        iscompleted=True
        for i in range(0,len(dataset)):
            X=dataset[i][:-1]
            Y=np.dot(W,X)#matrix multiply
            if sign(Y)==sign(dataset[i][-1]):
                continue
            else:
                iscompleted=False
                W=W+(dataset[i][-1])*np.array(X)
        if iscompleted:
            break
    print("final W is :",W)
    print("count is :",count)
    return W

def main():
    pla()

if __name__ == '__main__':
    main()

程序运行结果如下:

('final W is :', array([-1. , -1.20451 , 1.12317 , 1.50704028]))
('count is :', 9)

kNN算法

#coding:utf-8

from numpy import *
import operator

##给出训练数据以及对应的类别
def createDataSet():
    group = array([[1.0,2.0],[1.2,0.1],[0.1,1.4],[0.3,3.5]])
    labels = ['A','A','B','B']
    return group,labels

###通过KNN进行分类
def classify(input,dataSet,label,k):
    dataSize = dataSet.shape[0]
    ####计算欧式距离
    diff = tile(input,(dataSize,1)) - dataSet
    sqdiff = diff ** 2
    squareDist = sum(sqdiff,axis = 1)###行向量分别相加,从而得到新的一个行向量
    dist = squareDist ** 0.5
    
    ##对距离进行排序
    sortedDistIndex = argsort(dist)##argsort()根据元素的值从大到小对元素进行排序,返回下标

    classCount={}
    for i in range(k):
        voteLabel = label[sortedDistIndex[i]]
        ###对选取的K个样本所属的类别个数进行统计
        classCount[voteLabel] = classCount.get(voteLabel,0) + 1
    ###选取出现的类别次数最多的类别
    maxCount = 0
    for key,value in classCount.items():
        if value > maxCount:
            maxCount = value
            classes = key

    return classes

dataSet,labels = createDataSet()
input = array([1.1,0.3])
K = 3
output = classify(input,dataSet,labels,K)
print("测试数据为:",input,"分类结果为:",output)

决策树

def createDataSet():  
    dataSet = [[1,1,'yes'],  
               [1,1, 'yes'],  
               [1,0,'no'],  
               [0,1,'no'],  
               [0,1,'no']]  
    labels = ['no surfacing','flippers']  
    return dataSet, labels  

def calcShannonEnt(dataSet):  
    #calculate the shannon value  
    numEntries = len(dataSet)  
    labelCounts = {}  
    for featVec in dataSet:      #create the dictionary for all of the data  
        currentLabel = featVec[-1]  
        if currentLabel not in labelCounts.keys():  
            labelCounts[currentLabel] = 0  
        labelCounts[currentLabel] += 1  
    shannonEnt = 0.0  
    for key in labelCounts:  
        prob = float(labelCounts[key])/numEntries  
        shannonEnt -= prob*log(prob,2) #get the log value  
    return shannonEnt  

def chooseBestFeatureToSplit(dataSet):  
    numFeatures = len(dataSet[0])-1  
    baseEntropy = calcShannonEnt(dataSet)  
    bestInfoGain = 0.0; bestFeature = -1  
    for i in range(numFeatures):  
        featList = [example[i] for example in dataSet]  
        uniqueVals = set(featList)  
        newEntropy = 0.0  
        for value in uniqueVals:  
            subDataSet = splitDataSet(dataSet, i , value)  
            prob = len(subDataSet)/float(len(dataSet))  
            newEntropy +=prob * calcShannonEnt(subDataSet)  
        infoGain = baseEntropy - newEntropy  
        if(infoGain > bestInfoGain):  
            bestInfoGain = infoGain  
            bestFeature = i  
    return bestFeature  

def majorityCnt(classList):  
    classCount = {}  
    for vote in classList:  
        if vote not in classCount.keys(): classCount[vote] = 0  
        classCount[vote] += 1  
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)  
    return sortedClassCount[0][0]  

def createTree(dataSet, labels):  
    classList = [example[-1] for example in dataSet]  
    # the type is the same, so stop classify  
    if classList.count(classList[0]) == len(classList):  
        return classList[0]  
    # traversal all the features and choose the most frequent feature  
    if (len(dataSet[0]) == 1):  
        return majorityCnt(classList)  
    bestFeat = chooseBestFeatureToSplit(dataSet)  
    bestFeatLabel = labels[bestFeat]  
    myTree = {bestFeatLabel:{}}  
    del(labels[bestFeat])  
    #get the list which attain the whole properties  
    featValues = [example[bestFeat] for example in dataSet]  
    uniqueVals = set(featValues)  
    for value in uniqueVals:  
        subLabels = labels[:]  
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)  
    return myTree  

myDat, labels = trees.createDataSet()  
myTree = trees.createTree(myDat,labels)  
print myTree  
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容