标签:loss self torch batch dataset ------- pytorch Cnn size
pytorch1.1.0 + torchvision0.3.0 + cuda10.0.130 + NVIDIA-SMI 470.103.01
import torch
from torchvision import transforms #将图像转化为张量
from torchvision import datasets #对数据集相关处理
from torch.utils.data import DataLoader #下载数据集
import torch.nn.functional as F #使用relu()函数
import torch.optim as optim #优化器
batch_size = 64
"将图像转化成张量:28X28--->1X28X28 CXHXW,并归一化到0-1,0-255--->0-1;0.1307:均值,0.3081:标准差"
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])
train_dataset = datasets.MNIST(root='../dataset/mnist',train=True,transform=transform,download=True)
test_dataset = datasets.MNIST(root='../dataset/mnist',train=False,transform=transform,download=True)
train_loader = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_loader = DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=False)
"2.模型设计"
"输入 NX1X28X28变为 NX784"
"输出为NX10"
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5)
self.conv2 = torch.nn.Conv2d(10,20,kernel_size=5)
self.pooling = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(320,10)
def forward(self,x):
batch_size = x.size(0)
x = F.relu(self.pooling(self.conv1(x)))
#x = self.pooling(F.relu(self.conv1(x)))
x = F.relu(self.pooling(self.conv2(x)))
#拉长数据:(n,1,28,28)---->(n,784)
x = x.view(batch_size,-1)
x = self.fc(x)
return x
model = Net()
#该句表示使用GPU,若无GPU,可直接删除
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.to(device)
"3.优化器和损失"
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
"4.训练和测试"
def train(epoch):
running_loss = 0.0
for batch_idx,data in enumerate(train_loader,0):
inputs,target = data
#该句表示使用GPU,若无GPU,可直接删除
inputs,target = inputs.to(device),target.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs,target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299: #每300轮输出一次
print('[%d,%5d] loss: %.3f'%(epoch+1, batch_idx+1,running_loss/300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images,labels = data
#该句表示使用GPU,若无GPU,可直接删除
images, labels = images.to(device), labels.to(device)
outputs = model(images)
# 取出每一行最大值的下标(0-9),dim=1:第一个维度(行);dim=0:第0个维度,列
_,predicted = torch.max(outputs .data,dim=1)
total += labels.size(0) #取NX1矩阵中的N ==batch_size
correct += (predicted == labels).sum().item() #是否为真
#print(correct,total)
print("Accuracy on test set: %d %%"%(100*correct/total)) #正确的/总数
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
标签:loss,self,torch,batch,dataset,-------,pytorch,Cnn,size 来源: https://blog.csdn.net/qq_43545095/article/details/122765951
本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享; 2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关; 3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关; 4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除; 5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。