莫烦Pytorch神经网络第三部分代码修改
2021/6/20 23:28:37
本文主要是介绍莫烦Pytorch神经网络第三部分代码修改,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!
3.1Regression回归
import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt """ 创建数据 """ x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1) y = x.pow(2) + 0.2*torch.rand(x.size()) #增加噪点 x , y = Variable(x),Variable(y) # plt.scatter(x.data.numpy(),y.data.numpy()) #打印数据 # plt.show() """ 搭建网络 """ class Net(torch.nn.Module): def __init__(self,n_features,n_hidden,n_out): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_features,n_hidden) self.predict = torch.nn.Linear(n_hidden,n_out) def forward(self,x): x = F.relu(self.hidden(x)) x = self.predict(x) return x net = Net(1,10,1) # print(net) plt.ion() #实时打印的 plt.show() """ 优化网络 """ optimizer = torch.optim.SGD(net.parameters(),lr=0.5) loss_func = torch.nn.MSELoss() #MSELoss是用在线性预测 #打印环节 for t in range(100): prediction = net(x) loss = loss_func(prediction,y) optimizer.zero_grad() loss.backward() optimizer.step() if t % 5 ==0: plt.cla() plt.scatter(x.data.numpy(),y.data.numpy()) plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5) plt.text(0.5,0,'Loss=%.4f' % loss.item(),fontdict={'size':20,'color':'red'}) #注意莫老师这里loss.data[0]得换成loss.item() plt.pause(0.1) plt.ioff() plt.show()
3.2Classification分类
import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt """ 创建数据 """ n_data = torch.ones(100,2) x0 = torch.normal(2*n_data,1) y0 = torch.zeros(100) x1 = torch.normal(-2*n_data,1) y1 = torch.ones(100) x = torch.cat((x0,x1),0).type(torch.FloatTensor) y = torch.cat((y0,y1),).type(torch.LongTensor) x,y = Variable(x),Variable(y) # plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn') # plt.show() """ 网络搭建 """ class Net(torch.nn.Module): def __init__(self,n_features,n_hidden,n_out): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_features,n_hidden) self.predict = torch.nn.Linear(n_hidden,n_out) def forward(self,x): x = F.relu(self.hidden(x)) x = self.predict(x) return x net = Net(2,10,2) # print(net) plt.ion() #实时打印的 plt.show() optimizer = torch.optim.SGD(net.parameters(),lr=0.02) loss_func = torch.nn.CrossEntropyLoss() #CrossEntropyLoss用在分类的损失函数中 """ 结果打印 """ for t in range(100): out = net(x) loss = loss_func(out,y) optimizer.zero_grad() loss.backward() optimizer.step() if t % 2 == 0: plt.cla() prediction = torch.max(F.softmax(out),1)[1] #输出的结果在第二位,因为输出是二维,例如输出结果为[0,1],是指最大值为0,类型是1 pred_y = prediction.data.numpy().squeeze() target_y = y.data.numpy() plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=pred_y,s=100,lw=0,cmap='RdYlGn') accuracy = sum(pred_y == target_y) / 200 plt.text(1.5,-4,'Accuracy=%.2f'%accuracy,fontdict={'size':20,'color':'red'}) plt.pause(0.1) plt.ioff() plt.show()
3.3快速搭建法
import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt """ 创建数据 """ n_data = torch.ones(100,2) x0 = torch.normal(2*n_data,1) y0 = torch.zeros(100) x1 = torch.normal(-2*n_data,1) y1 = torch.ones(100) x = torch.cat((x0,x1),0).type(torch.FloatTensor) y = torch.cat((y0,y1),).type(torch.LongTensor) x,y = Variable(x),Variable(y) # plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn') # plt.show() """ 普通网络搭建 """ class Net(torch.nn.Module): def __init__(self,n_features,n_hidden,n_out): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_features,n_hidden) self.predict = torch.nn.Linear(n_hidden,n_out) def forward(self,x): x = F.relu(self.hidden(x)) x = self.predict(x) return x net1 = Net(2,10,2) """ 快速网络搭建 """ net2 = torch.nn.Sequential( torch.nn.Linear(2,10), torch.nn.ReLU(), torch.nn.Linear(10,2) ) print(net1) print(net2)
3.4保存提取
import torch from torch.autograd import Variable import matplotlib.pyplot as plt #fake data x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1) y = x.pow(2) + 0.2*torch.rand(x.size()) x,y = Variable(x,requires_grad=False),Variable(y,requires_grad=False) """ 保存 """ def save(): net1 = torch.nn.Sequential( torch.nn.Linear(1, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1) ) optimizer = torch.optim.SGD(net1.parameters(), lr=0.5) loss_func = torch.nn.MSELoss() for t in range(100): prediction = net1(x) loss = loss_func(prediction, y) optimizer.zero_grad() loss.backward() optimizer.step() torch.save(net1,'net.pkl') #保存网络 torch.save(net1.state_dict(),'net_params.pkl') #保存参数 #画图 plt.figure(1,figsize=(10,3)) plt.subplot(131) plt.title('Net1') plt.scatter(x.data.numpy(),y.data.numpy()) plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5) """ 提取网络模型 """ def restore_net(): net2 = torch.load('net.pkl') prediction = net2(x) plt.subplot(132) plt.title('Net2') plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5) """ 提取网络参数 """ def restore_params(): net3 = torch.nn.Sequential( torch.nn.Linear(1, 10), torch.nn.ReLU(), torch.nn.Linear(10, 1) ) net3.load_state_dict(torch.load('net_params.pkl')) prediction = net3(x) plt.subplot(133) plt.title('Net3') plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5) plt.show() save() restore_net() restore_params()
3.5批数据训练
import torch import torch.utils.data as Data BATCH_SIZE = 5 x = torch.linspace(1,10,10) y = torch.linspace(10,1,10) #torch_dataset = Data.TensorDataset(data_tensor=x,target_tensor=y) #莫老师使用的这个方法在高版本报错 使用下边的语句可以解决 torch_dataset = Data.TensorDataset(x,y) loader = Data.DataLoader( dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, #num_workers=2, #线程数 windows用户这里要去掉 因为windows系统中没有Fork函数,多线程会报错 ) for epoch in range(3): for step,(batch_x,batch_y) in enumerate(loader): #training print('Epoch:',epoch,'|Step:',step,'|batch x:',batch_x.numpy(),'|batch y:',batch_y.numpy())
3.6Optimizer优化器
import torch import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt import torch.utils.data as Data #hyper parameters LR = 0.01 BATCH_SIZE = 32 EPOCH = 12 x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1) y = x.pow(2) + 0.2*torch.rand(x.size()) # plt.scatter(x.numpy(),y.numpy()) # plt.show() torch_dataset = Data.TensorDataset(x,y) loader = Data.DataLoader(dataset=torch_dataset,batch_size=BATCH_SIZE,shuffle=True) class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.hidden = torch.nn.Linear(1,20) self.predict = torch.nn.Linear(20,1) def forward(self,x): x = F.relu(self.hidden(x)) x = self.predict(x) return x net_SGD = Net() net_Momentum = Net() net_RMSprop = Net() net_Adam = Net() nets = [net_SGD,net_Momentum,net_RMSprop,net_Adam] opt_SGD = torch.optim.SGD(net_SGD.parameters(),lr=LR) opt_Momentum = torch.optim.SGD(net_Momentum.parameters(),lr=LR,momentum=0.8) opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(),lr=LR,alpha=0.9) opt_Adam = torch.optim.Adam(net_Adam.parameters(),lr=LR,betas=(0.9,0.99)) optimizers = [opt_SGD,opt_Momentum,opt_RMSprop,opt_Adam] loss_func = torch.nn.MSELoss() losses_his=[[],[],[],[]] #记录损失 for epoch in range(EPOCH): print(epoch) for step,(batch_x,batch_y) in enumerate(loader): # b_x = Variable(batch_x) #新版本pytorch不用这个了 # b_y = Variable(batch_y) for net,opt,l_his in zip(nets,optimizers,losses_his): output = net(batch_x) loss = loss_func(output,batch_y) opt.zero_grad() loss.backward() opt.step() l_his.append(loss.item()) labels = ['SGD','Momentum','RMSprop','Adam'] for i,l_his in enumerate(losses_his): plt.plot(l_his,label = labels[i]) plt.legend(loc = 'best') plt.xlabel('Steps') plt.ylabel('Loss') plt.show()
这篇关于莫烦Pytorch神经网络第三部分代码修改的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!
- 2024-06-30uniAPP 实现全屏左右滚动滚动的效果-icode9专业技术文章分享
- 2024-06-30如何在本地使用授权或插件-icode9专业技术文章分享
- 2024-06-30伪静态规则配置方法汇总-icode9专业技术文章分享
- 2024-06-29易优CMS安装常见问题汇总-icode9专业技术文章分享
- 2024-06-28易优新手必读安装教程-icode9专业技术文章分享
- 2024-06-28忘记eyoucms后台密码怎么办?-icode9专业技术文章分享
- 2024-06-26终极指南:Scrum中如何设置需求优先级
- 2024-06-26AI大模型企业应用实战(25)-为Langchain Agent添加记忆功能
- 2024-06-26小白家庭 nas 搭建方案-icode9专业技术文章分享
- 2024-06-23AI大模型企业应用实战(14)-langchain的Embedding