您的位置:首页 > 运维架构

pytorch 7 optimizer 优化器 加速训练

2019-02-26 20:48 453 查看
版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/weixin_42419002/article/details/88859728

pytorch 7 optimizer 优化器 加速训练

import torch
import torch.utils.data as Data
import torch.nn.functional as F
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

超参数设置

LR = 0.01
BATCH_SIZE = 32
EPOCH = 12
# fake dataset
x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
y = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))

# plot dataset
plt.scatter(x.numpy(), y.numpy())
plt.show()

# put dateset into torch dataset
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2,)

# default network
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(1, 20)   # hidden layer
self.predict = torch.nn.Linear(20, 1)   # output layer

def forward(self, x):
x = F.relu(self.hidden(x))      # activation function for hidden layer
x = self.predict(x)             # linear output
return x

if __name__ == '__main__':
# different nets
net_SGD         = Net()
net_Momentum    = Net()
net_RMSprop     = Net()
net_Adam        = Net()
nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]  # 构造4个网络,谈们的结构都是相同的

# different optimizers
opt_SGD         = torch.optim.SGD(net_SGD.parameters(), lr=LR)
opt_Momentum    = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
opt_RMSprop     = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
opt_Adam        = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]  # 使用4个不同的优化器来优化参数

loss_func = torch.nn.MSELoss()
losses_his = [[], [], [], []]   # 使用四个列表来记录当前的lossz值,record loss

# training
for epoch in range(EPOCH):
print('Epoch: ', epoch)
for step, (b_x, b_y) in enumerate(loader):          # for each training step
for net, opt, l_his in zip(nets, optimizers, losses_his):  # 每一次循环都使用一个网络和优化器来训练,添加当前loss值进列表
output = net(b_x)              # get output for every net
loss = loss_func(output, b_y)  # compute loss for every net
opt.zero_grad()                # clear gradients for next train
loss.backward()                # backpropagation, compute gradients
opt.step()                     # apply gradients
l_his.append(loss.data.numpy())     # loss recoder

labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
for i, l_his in enumerate(losses_his):
plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.ylim((0, 0.2))
plt.show()
  • 数据

  • 4种优化器的训练过程的loss变化,下降的越快越好

END

posted @ 2019-02-26 20:48 YangZhaonan 阅读(...) 评论(...) 编辑 收藏
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: