ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

PyTorch实现用resnet18训练cifar数据集

2021-12-26 22:02:22  阅读:178  来源: 互联网

标签:resnet18 __ ch nn 32 self cifar PyTorch out


随便记录一下自己的学习过程
train.py

import torch

from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch import nn, optim
from lenet5 import Lenet5
from resnet import ResNet18

def main():
    batch_size = 32
    # 这个是一次加载一张的数据,所以后续还要定义怎么按批次加载
    cifar_train = datasets.CIFAR10('cifar', True, transform=transforms.Compose([
        transforms.Resize((32, 32)), # 转换数据格式
        transforms.ToTensor(), # ToTensor,Tensor为计算的基本单元
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225]
        ) # 标准化,参数都是设定好的
    ]), download=True)
    # 数据加载器,# shuffle设置随机化
    cifar_train = DataLoader(cifar_train, batch_size=batch_size, shuffle=True)

    cifar_test = datasets.CIFAR10('cifar', False, transform=transforms.Compose([
        transforms.Resize((32, 32)),  # 转换数据格式
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225]
        ) # 标准化
    ]), download=True)
    cifar_test = DataLoader(cifar_test, batch_size=batch_size, shuffle=True)  # shuffle设置随机化

    x, label = iter(cifar_train).next()
    print("x: ", x.shape, "  label: ", label.shape)

    device = torch.device('cuda') # 命名一个GPU设备
    #model = Lenet5().to(device)
    model = ResNet18().to(device) # 将模型导入到GPU
    criteon = nn.CrossEntropyLoss().to(device)
    print(model)
    optimizer = optim.Adam(model.parameters(), lr=1e-3) # 定义优化器

    for epoch in range(1000):
        model.train() # 将模型变为训练模式
        for batch_index, (x, label) in enumerate(cifar_train):
            # [b, 3, 32, 32], [b]
            x, label = x.to(device), label.to(device)

            logits = model(x)
            # logits:   [b, 10]
            # label:    [b]
            # loss:     tensor scalar
            loss = criteon(logits, label)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print(epoch, loss.item())

        model.eval() # 将模型变为测试模式
        with torch.no_grad(): # 告诉torch不需要计算梯度
            # test
            total_correct = 0
            total_num = 0
            for x, label in cifar_test:
                # [b, 3, 32, 32], [b]
                x, label = x.to(device), label.to(device)

                # [b, 10]
                logits = model(x)
                # [b]
                pred = logits.argmax(dim=1)
                # [b] vs [b] => scalar tensor
                total_correct += torch.eq(pred, label).float().sum().item()
                total_num += x.size(0)

            acc = total_correct / total_num
            print("test", epoch, acc)

if __name__ == '__main__':
    main()

同目录下的resnet.py

import torch
from torch import nn
from torch.nn import functional as F

class ResBlk(nn.Module):
    """
    resnet block
    """

    def __init__(self, ch_in, ch_out, stride=1):
        """

        :param ch_in:
        :param ch_out:
        """
        super(ResBlk, self).__init__()

        self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(ch_out)
        self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(ch_out)

        # 如果形状不一样就改成一样的
        self.extra = nn.Sequential()
        if ch_out != ch_in:
            # [b, ch_in, h, w] => [b, ch_out, h, w]
            self.extra = nn.Sequential(
                nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=stride),
                nn.BatchNorm2d(ch_out)
            )

    def forward(self, x):
        """

        :param x: [b, ch, h, w]
        :return:
        """
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        # short cut
        # element-wise add: [b, ch_in, h, w] wit [b, ch_out, h, w]
        out = self.extra(x) + out
        out = F.relu(out)
        return out


class ResNet18(nn.Module):
    def __init__(self):
        super(ResNet18, self).__init__()

        # 预处理层
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=3, padding=0),
            nn.BatchNorm2d(64)
        )
        # followed 4 blocks
        # [b, 64, h, w] => [b, 128, h, w]
        self.blk1 = ResBlk(64, 128, stride=2)
        # [b, 128, h, w] => [b, 256, h, w]
        self.blk2 = ResBlk(128, 256, stride=2)
        # [b, 256, h, w] => [b, 512, h, w]
        self.blk3 = ResBlk(256, 512, stride=2)
        # [b, 512, h, w] => [n, 1024, h, w]
        self.blk4 = ResBlk(512, 512, stride=2)

        self.outlayer = nn.Linear(512*1*1, 10)

    def forward(self, x):
        """

        :param x:
        :return:
        """
        x = F.relu(self.conv1(x))

        # [b, 64, h, w] => [b, 1024, h, w]
        x = self.blk1(x)
        x = self.blk2(x)
        x = self.blk3(x)
        x = self.blk4(x)

        #print("after conv:", x.shape) # [b, 512, 2, 2]
        # [b, 512, 2, 2] => [b, 512, 1, 1]
        x = F.adaptive_avg_pool2d(x, [1, 1])
        #print("after pool:", x.shape) # [b, 512, 1, 1]
        # [b, 512, 1, 1] => [b, 512]
        x = x.view(x.size(0), -1)
        x = self.outlayer(x)

        return x

def main():
    blk = ResBlk(64, 128, stride=2)
    tmp = torch.randn(2, 64, 32, 32)
    out = blk(tmp)
    print("block: ", out.shape)

    x = torch.randn(2, 3, 32, 32)
    model = ResNet18()
    out = model(x)
    print("resnet:", out.shape)

if __name__ == '__main__':
    main()

标签:resnet18,__,ch,nn,32,self,cifar,PyTorch,out
来源: https://blog.csdn.net/qq_39502099/article/details/122161671

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有