您的位置:首页 > 编程语言 > Python开发

机器学习python决策树源码

2018-01-31 20:11 441 查看
from math import log
import operator
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
return dataSet, labels

#计算信息熵
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelcount = {}
for i in dataSet:
currentLabel = i[-1]
if currentLabel not in labelcount.keys():
labelcount[currentLabel] = 0
labelcount[currentLabel] += 1
shannonEnt = 0.0
for keys in labelcount:
prob = float(labelcount[keys]/numEntries)
shannonEnt -= prob * log(prob,2)
return shannonEnt
#划分数据集
def splitDataSet(dataSet,axis,value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatureVec = featVec[:axis]
reducedFeatureVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatureVec)
return retDataSet
#选择最好的数据划分方式
def chooseBestFeatureToSplit(dataSet):
numFeature = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeature):
featList = [index_value[i] for index_value in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
classcount = {}
for vote in classList:
if vote not in classList.keys():
classcount[vote] = 0
classcount[vote] += 1
sortedClassCount = sorted(classcount.items,key = operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
#创建树
def createTree(dataSet,labels):
classList = [example[-1] for example in dataSet]
if classList.count(classList[0] == len(classList)):
return classList[0]
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels)
return myTree

def storeTree(inputTree, filename):
import pickle
fw = open(filename, 'w')
pickle.dump(inputTree, fw)
fw.close()
def grabTree(filename):
import pickle
fr = open(filename)
return pickle.load(fr)
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: