梯度下降求解逻辑回归(代码)
2017-12-03 16:18
471 查看
import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import os path = 'data' + os.sep + 'LogiReg_data.txt' pdData = pd.read_csv(path, header=None, names=['Exam 1', 'Exam 2', 'Admitted']) pdData.head()#前两列为特征,第三列为录取与否 pdData.shape#100个数据,每个样本有三列特征值 positive = pdData[pdData['Admitted'] == 1]#正例,返回行子集 negative = pdData[pdData['Admitted'] == 0]#负例(进行指定) fig,ax = plt.subplots(figsize=(10,15))#figsize为画图域,长15,宽10 ax.scatter(positive['Exam 1'], positive['Exam 2'], s=30, c='b', marker='o', label='Admitted')#正例和负例,标签与颜色 ax.scatter(negative['Exam 1'], negative['Exam 2'], s=30, c='r', marker='x', label='Not Admitted') ax.legend() ax.set_xlabel('Exam 1 Score') ax.set_ylabel('Exam 2 Score') def sigmoid(z):#定义sigmod函数 return 1 / (1 + np.exp(-z))#np.exp(-z)为e的-z次幂(转数值运算为矩阵运算) nums = np.arange(-10, 10, step=1) #creates a vector containing 20 equally spaced values from -10 to 10 fig, ax = plt.subplots(figsize=(12,4)) ax.plot(nums, sigmoid(nums), 'r') def model(X, theta):#输入X数域,theta为参数(model为预测函数) return sigmoid(np.dot(X, theta.T))#np.dot为矩阵乘法,X与theta相乘,结果传入sigmod函数中 pdData.insert(0, 'Ones', 1) # 新加一列,列名为Ones,值全为1 # set X (training data) and y (target variable) orig_data = pdData.as_matrix() # convert the Pandas representation of the data to an array useful for further computations cols = orig_data.shape[1] X = orig_data[:,0:cols-1] y = orig_data[:,cols-1:cols] # convert to numpy arrays and initalize the parameter array theta #X = np.matrix(X.values) #y = np.matrix(data.iloc[:,3:4].values) #np.array(y.values) theta = np.zeros([1, 3])#构造1行三列的theta参数,占位 X[:5] y[:5] theta X.shape, y.shape, theta.shape def cost(X, y, theta):#定义损失函数,X为数据,y为标签,theta为参数 left = np.multiply(-y, np.log(model(X, theta))) right = np.multiply(1 - y, np.log(1 - model(X, theta))) return np.sum(left - right) / (len(X)) cost(X, y, theta) def gradient(X, y, theta): grad = np.zeros(theta.shape)#定义梯度,与theta一一对应 error = (model(X, theta)- y).ravel() for j in range(len(theta.ravel())): #for each parmeter term = np.multiply(error, X[:,j])#取第j列样本 grad[0, j] = np.sum(term) / len(X) return grad STOP_ITER = 0 STOP_COST = 1 STOP_GRAD = 2 def stopCriterion(type, value, threshold): #设定三种不同的停止策略 if type == STOP_ITER: return value > threshold #指定次数 elif type == STOP_COST: return abs(value[-1]-value[-2]) < threshold #指定阈值 elif type == STOP_GRAD: return np.linalg.norm(value) < threshold #指定阈值 import numpy.random #洗牌 def shuffleData(data): np.random.shuffle(data) cols = data.shape[1] X = data[:, 0:cols-1] y = data[:, cols-1:] return X, y import time#观察时间对结果的影响 def descent(data, theta, batchSize, stopType, thresh, alpha):#数据,参数,梯度下降样式,策略,阈值,学习率 #梯度下降求解 init_time = time.time() i = 0 # 迭代次数 k = 0 # batch X, y = shuffleData(data) grad = np.zeros(theta.shape) # 计算的梯度 costs = [cost(X, y, theta)] # 损失值 while True: grad = gradient(X[k:k+batchSize], y[k:k+batchSize], theta) k += batchSize #取batch数量个数据 if k >= n: k = 0 X, y = shuffleData(data) #重新洗牌 theta = theta - alpha*grad # 参数更新 costs.append(cost(X, y, theta)) # 计算新的损失 i += 1 if stopType == STOP_ITER: value = i elif stopType == STOP_COST: value = costs elif stopType == STOP_GRAD: value = grad if stopCriterion(stopType, value, thresh): break return theta, i-1, costs, grad, time.time() - init_time def runExpe(data, theta, batchSize, stopType, thresh, alpha):#根据参数选择梯度下降方式和策略 #import pdb; pdb.set_trace(); theta, iter, costs, grad, dur = descent(data, theta, batchSize, stopType, thresh, alpha) name = "Original" if (data[:,1]>2).sum() > 1 else "Scaled" name += " data - learning rate: {} - ".format(alpha) if batchSize==n: strDescType = "Gradient" elif batchSize==1: strDescType = "Stochastic" else: strDescType = "Mini-batch ({})".format(batchSize) name += strDescType + " descent - Stop: " if stopType == STOP_ITER: strStop = "{} iterations".format(thresh) elif stopType == STOP_COST: strStop = "costs change < {}".format(thresh) else: strStop = "gradient norm < {}".format(thresh) name += strStop print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format( name, theta, iter, costs[-1], dur)) fig, ax = plt.subplots(figsize=(12,4)) ax.plot(np.arange(len(costs)), costs, 'r') ax.set_xlabel('Iterations') ax.set_ylabel('Cost') ax.set_title(name.upper() + ' - Error vs. Iteration') return theta #选择的梯度下降方法是基于所有样本的 n=100 runExpe(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.000001) runExpe(orig_data, theta, n, STOP_COST, thresh=0.000001, alpha=0.001) runExpe(orig_data, theta, n, STOP_GRAD, thresh=0.05, alpha=0.001) runExpe(orig_data, theta, 1, STOP_ITER, thresh=5000, alpha=0.001) runExpe(orig_data, theta, 1, STOP_ITER, thresh=15000, alpha=0.000002) runExpe(orig_data, theta, 16, STOP_ITER, thresh=15000, alpha=0.001) from sklearn import preprocessing as pp scaled_data = orig_data.copy() scaled_data[:, 1:3] = pp.scale(orig_data[:, 1:3]) runExpe(scaled_data, theta, n, STOP_ITER, thresh=5000, alpha=0.001) runExpe(scaled_data, theta, n, STOP_GRAD, thresh=0.02, alpha=0.001) theta = runExpe(scaled_data, theta, 1, STOP_GRAD, thresh=0.002/5, alpha=0.001) runExpe(scaled_data, theta, 16, STOP_GRAD, thresh=0.002*2, alpha=0.001) #设定阈值 def predict(X, theta): return [1 if x >= 0.5 else 0 for x in model(X, theta)] scaled_X = scaled_data[:, :3] y = scaled_data[:, 3] predictions = predict(scaled_X, theta) correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)] accuracy = (sum(map(int, correct)) % len(correct)) print ('accuracy = {0}%'.format(accuracy))#进行逻辑回归,梯度下降,并对比
相关文章推荐
- 梯度下降求解逻辑回归2(代码编写以及三种梯度下降对比)
- 梯度下降求解逻辑回归
- 梯度下降求解逻辑回归(Python)
- MLiA 逻辑回归 求解回归函数的系数中梯度下降法及其向量化
- LR 逻辑回归代码 (梯度下降)
- 机器学习实战——梯度下降求解逻辑回归(1理论基础)
- 梯度下降求解逻辑回归
- 逻辑回归梯度下降法的推导过程
- 逻辑回归梯度下降法详解
- 逻辑回归与梯度下降
- 线性回归之梯度下降(附代码)
- 逻辑斯蒂回归梯度下降法推导
- 梯度下降和逻辑回归
- Andrew机器学习课程笔记(1)——梯度下降、逻辑回归
- [置顶] windows10 tensorflow(二)原理实战之回归分析,深度学习框架(梯度下降法求解回归参数)
- Java应用梯度下降求解线性SVM模型参考代码
- 逻辑回归python实现(随机增量梯度下降,变步长)
- 逻辑回归与梯度下降详解
- 逻辑回归和梯度下降
- 回归问题总结(梯度下降、线性回归、逻辑回归、源码、正则化)