您的位置:首页 > 编程语言 > Python开发

分类算法系列1-----KNN(K近邻)算法思想和python实现

2017-04-05 23:39 716 查看

1.1K近邻算法的简介

K近邻算法用于测量不同特征值之间的距离进行分类

优点:精度高、对异常值不敏感、无数据输入假设

缺点:计算复杂度高、空间复杂度高

1.2K近邻算计算流程



1.3 K近邻算法例子

下图个K近邻算法的计算步骤流程,由于K=4,Top4有3个为flag1则A的类别为flag1,下图选用最常用的计算距离的公式两点之间距离(欧式距离)



1.5Python代码实现

一下代码参加机器学习实战

from numpy import *
import matplotlib.pyplot as plt
#读数据集
def file2matrix(filename):
fr = open(filename)
arror=fr.readlines()
numberOfLines = len(arror) #get the number of lines in the file
returnMat = zeros((numberOfLines,3)) #prepare matrix to return
classLabelVector = [] #prepare labels return
index = 0
for line in arror:
line = line.strip()
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index += 1
return returnMat,classLabelVector
#数据集归一化
def autoNorm(dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m,1))
normDataSet = normDataSet/tile(ranges, (m,1)) #element wise divide
return normDataSet, ranges, minVals

#分类器
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]#数据集记录数
diffMat = tile(inX, (dataSetSize,1)) - dataSet#生成一个和dataSet相同维度的数据集
sqDiffMat = diffMat**2#平方项
sqDistances = sqDiffMat.sum(axis=1)#按行求和
distances = sqDistances**0.5#开方
sortedDistIndicies = distances.argsort() #对结果进行排序
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]

#对钱hoRatio*项进行分类,
def datingClassTest():
hoRatio = 0.50 #训练集的数据量
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt') #load data setfrom file
normMat, ranges, minVals = autoNorm(datingDataMat)#数据归一化,normMat归一化的结果,ranges最大值和最小值范围,minVals最小值
m = normMat.shape[0]#行数
# print shape(normMat)
numTestVecs = int(m*hoRatio)#
print numTestVecs
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],5)
# print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i])
if (classifierResult != datingLabels[i]): errorCount += 1.0
print "the total error rate is: %f" % (errorCount/float(numTestVecs))
print errorCount

def classifyPerson():
resultList=['not at all','in some doses','in large doses']
ffmiles=float(raw_input('frequent filer miles earned'))
precents=float(raw_input('precents filer miles earned'))
iceCream=float(raw_input('liters of ice'))
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat)
inArray=[ffmiles,precents,iceCream]
classifierResult = classify0((inArray-minVals)*1.0/ranges,normMat,datingLabels,3)
print resultList[classifierResult-1]
print classifierResult

if __name__=='__main__':
datingClassTest()
classifyPerson()
下图为数据的分布趋势






下面为用户手动输入数据观测数据所属类别





至此K近邻算法的实现基本完成
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  数据挖掘 算法 KNN