随机梯度下降解线性回归
2013-10-26 15:36
92 查看
%用一个样本计算梯度值;然后更新theta值
function theta = Stochastic_GD(x, y, max_iter, loss_thrd, learn_rate)
[m, n] = size(x); %取得 x 的行数和列数
theta = zeros(n, 1); %创建 n 维全0列向量,存储待辨识参数的初值; 等价于theta = [0;0];
loss_pre = 10000;
iter = 0;
while iter < max_iter
[x y] = RandomSort(x, y);
for i = 1:m
error = y(i)-calcu_hx(theta, x, i); %计算第 i 个样本产生的偏差;
for j = 1:n
theta(j) = theta(j) + learn_rate * error * x(i, j);
end
end
loss = 0.0;
for i = 1:m
loss = loss + (y(i)-calcu_hx(theta, x, i))^2; %计算损失函数
end
if abs(loss_pre - loss) <= loss_thrd
break;
end
fprintf('loop = %d,\tloss = %0.6f,\ttheta(1) = %0.6f,\ttheta(2) = %f\n',iter, loss, theta(1), theta(2));
loss_pre = loss;
iter = iter + 1;
end
end
%对x1,x2进行随机排序后保存到y1,y2中,模拟随机产生样本
function [y1 y2] = RandomSort(x1, x2)
y1 = x1;
y2 = x2;
[m, n] = size(x1);
r = randperm(m);
for i=1:m
y1(i, 2) = x1(r(i), 2);
y2(i) = x2(r(i));
end
end
%计算第 index 样本产生的估计值
function hx = calcu_hx(theta, x, index)
hx = 0.0;
for i = 1:length(theta)
hx = hx + theta(i)*x(index, i);
end
end
function theta = Stochastic_GD(x, y, max_iter, loss_thrd, learn_rate)
[m, n] = size(x); %取得 x 的行数和列数
theta = zeros(n, 1); %创建 n 维全0列向量,存储待辨识参数的初值; 等价于theta = [0;0];
loss_pre = 10000;
iter = 0;
while iter < max_iter
[x y] = RandomSort(x, y);
for i = 1:m
error = y(i)-calcu_hx(theta, x, i); %计算第 i 个样本产生的偏差;
for j = 1:n
theta(j) = theta(j) + learn_rate * error * x(i, j);
end
end
loss = 0.0;
for i = 1:m
loss = loss + (y(i)-calcu_hx(theta, x, i))^2; %计算损失函数
end
if abs(loss_pre - loss) <= loss_thrd
break;
end
fprintf('loop = %d,\tloss = %0.6f,\ttheta(1) = %0.6f,\ttheta(2) = %f\n',iter, loss, theta(1), theta(2));
loss_pre = loss;
iter = iter + 1;
end
end
%对x1,x2进行随机排序后保存到y1,y2中,模拟随机产生样本
function [y1 y2] = RandomSort(x1, x2)
y1 = x1;
y2 = x2;
[m, n] = size(x1);
r = randperm(m);
for i=1:m
y1(i, 2) = x1(r(i), 2);
y2(i) = x2(r(i));
end
end
%计算第 index 样本产生的估计值
function hx = calcu_hx(theta, x, index)
hx = 0.0;
for i = 1:length(theta)
hx = hx + theta(i)*x(index, i);
end
end
相关文章推荐
- 机器学习中的数学(2)-线性回归,偏差、方差权衡
- 【Stanford Machine Learning Open Course】2. 线性回归问题介绍
- 贝叶斯线性回归(Bayesian Linear Regression)
- Coursera公开课笔记: 斯坦福大学机器学习第二课“单变量线性回归(Linear regression with one variable)”
- Stanford机器学习---第二讲. 多变量线性回归 Linear Regression with multiple variable
- Matlab实现线性回归和逻辑回归: Linear Regression & Logistic Regression
- 批量梯度下降和随机梯度下降matlab 实现
- 线性回归
- 局部权重线性回归(Locally weighted linear regression)
- Coursera公开课笔记: 斯坦福大学机器学习第四课“多变量线性回归(Linear Regression with Multiple Variables)”
- 局部权重线性回归(Locally weighted linear regression)
- 线性回归与梯度下降法
- 线性回归和局部加权线性回归
- 线性回归与复相关系数
- Apache Spark源码走读之22 -- 浅谈mllib中线性回归的算法实现
- 斯坦福大学机器学习第三课“多变量线性回归“
- 线性回归
- 线性回归
- 从一个R语言案例学线性回归
- 一元线性回归(原理)