您的位置:首页 > 其它

数据挖掘-利用逻辑回归算法进行分类

2019-03-27 21:37 330 查看
版权声明:未经原作者允许不得转载本文内容,否则将视为侵权 https://blog.csdn.net/springhammer/article/details/88856214

【实验目的】
1、 掌握逻辑回归算法的原理,理解算法的步骤。
2、 掌握不同梯度方法下的逻辑回归算法,加深对逻辑回归算法的理解。

【实验性质】
设计型实验

【实验内容】

使用Logistic算法实现分类

【实验环境】
Python 2

代码:

from numpy import *

def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat

def sigmoid(inX):
return 1.0/(1+exp(-inX))

def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn)
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m,n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles):
h = sigmoid(dataMatrix*weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.transpose()* error #matrix mult
return weights

def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()

def stocGradAscent0(dataMatrix, classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)   #initialize to all ones
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights

def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n)   #initialize to all ones
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001    #apha decreases with iteration, does not
randIndex = int(random.uniform(0,len(dataIndex)))#go to 0 because of the constant
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights

测试代码:

import logRegres
dataArr,labelMat=logRegres.loadDataSet()
a = logRegres.gradAscent(dataArr,labelMat)
print a

from numpy import *
reload(logRegres)
print logRegres.plotBestFit(a.getA())

'''
weights = logRegres.stocGradAscent0 (array(dataArr),labelMat)
print logRegres.plotBestFit(weights)
'''

weights = logRegres.stocGradAscent1 (array(dataArr),labelMat)
print logRegres.plotBestFit(weights)

【实验步骤】

程序清单5-1 Logistic回归梯度上升优化算法
from numpy import *

def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat

def sigmoid(inX):
return 1.0/(1+exp(-inX))

def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn)             #convert to NumPy matrix
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m,n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles):              #heavy on matrix operations
h = sigmoid(dataMatrix*weights)     #matrix mult
error = (labelMat - h)              #vector subtraction
weights = weights + alpha * dataMatrix.transpose()* error #matrix mult
return weights

注:测试数据下方留言给

内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: