ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

implement of Deep_learning Code

2022-04-18 01:32:14  阅读:189  来源: 互联网

标签:loss Code self torch liner learning implement data model


Line_Model
import torch
import torch.nn as nn
import math
import random
import numpy as np

# 计算线性回归模型 梯度
def Cal_SGD_Linear(x, pred, label, lr, k, bias=0):
    g = 0
    for (idx, item) in enumerate(pred):
        g += (item - label[idx]) * x[idx]
    # 梯度 即loss关于模型参数的导数在当前参数 的导数值
    g = (2 * g) / len(x)
    print(k - lr * g)
    return {'k': k - lr * g, 'bias': 0 if bias == 0 else bias - lr * g}

def Cal_MSE(pred, label):
    loss = 0
    for (idx, item) in enumerate(pred):
        loss += math.pow(item - label[idx], 2)
    # print(loss / len(pred))   # MSE 均方误差
    # print(math.sqrt(loss / len(pred)))   # RMSE 均方根误差

def gen_line_data(len_data):
    x = torch.linspace(10, 110, len_data)
    x = torch.unsqueeze(x, dim=1)
    y = 2 * x + torch.rand(x.size())
    return {'x': x, 'y': y}

class LineRegressionNet(nn.Module):
    def __init__(self) -> object:
        super().__init__()
        self.liner = nn.Linear(1, 1, bias=False)

    def forward(self, x):
        out = self.liner(x)
        return out

class line_model():
    def __init__(self, lr, epoches):
        self.lr = lr
        self.epoches = epoches
        self.init_model()
    def init_model(self):
        self.model = LineRegressionNet()
        self.optimiser = torch.optim.SGD(self.model.parameters(), lr=self.lr)
        self.loss_fn = torch.nn.MSELoss()
    def train_model(self , data , model_save_path="model.ck"):
        x = data['x']
        y = data['y']
        model = self.model
        for th in range(self.epoches):
            random.Random(th).shuffle(x)
            random.Random(th).shuffle(y)
            model.zero_grad()
            outputs = model(x)
            loss = self.loss_fn(outputs , y )
            loss.backward()
            self.optimiser.step()
        self.model_save_path = model_save_path
        torch.save(model.state_dict() , model_save_path )
    def test_model(self , data):
        x = data['x']
        y = data['y']
        self.model.load_state_dict(torch.load(self.model_save_path))
        pred = self.model(x)
        print(x , pred)

train_data = gen_line_data(10)
test_data = gen_line_data(5)
learning_rate = 0.0001
liner_model = line_model(learning_rate , 100)
liner_model.train_model(train_data)
liner_model.test_model(test_data)

'''
loss_function = torch.nn.MSELoss()
optimizer = torch.optim.Adam(liner_model.parameters(), lr=learning_rate)
optimizer = torch.optim.Adagrad(liner_model.parameters(), lr=learning_rate)
optimizer = torch.optim.SGD(liner_model.parameters(), lr=learning_rate)  # 随机梯度下降
x = data['x']
y = data['y']
for i in range(10):
    optimizer.zero_grad()  # 清空上一次梯度
    outputs = liner_model(x)
    # Cal_MSE(outputs, y)
    loss = loss_function(outputs, y)  # 前向传播
    pp = liner_model.state_dict()
    print('liner.weight', pp['liner.weight'])
    Cal_SGD_Linear(x, outputs, y, learning_rate, pp['liner.weight'][0])
    loss.backward()  # 反向传播
    optimizer.step()  # 优化器参数更新

pp = liner_model.state_dict()
# test_data = torch.unsqueeze(torch.linspace(100, 200, 10) , dim=1)
# print(test_data, liner_model(test_data))'''

CNN_Model
import torch
import torch.nn as nn
import numpy as np

def gen_line_data(len_data):
    x = torch.linspace(0, 100, len_data)
    x = torch.unsqueeze(x, dim=1)
    y = 2 * x + torch.rand(x.size())
    return {'x': x, 'y': y}

class CnnNet(nn.Module):
    def __init__(self):
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16 , kernel_size=5 , stride=1,padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2,stride=2)
        )
        self.fc = nn.Linear(16 , 10)
    def forward(self, x):
        out = self.layer1(x)
        out = self.fc(out)
        return out

data = gen_line_data(10)
liner_model = CnnNet()
learning_rate = 0.02
loss_function = torch.nn.MSELoss()
optimizer = torch.optim.SGD(liner_model.parameters(), lr=learning_rate)

x = data['x']
y = data['y']
# 前向传递
outputs = liner_model(x)
loss = loss_function(outputs, y)
# 反向传播和参数更新
optimizer.zero_grad()  # 清空上一次梯度
loss.backward()  # 反向传播
optimizer.step()  # 优化器参数更新

test_data = torch.unsqueeze(torch.linspace(100, 200, 10), dim=1)
print(test_data, liner_model(test_data))

标签:loss,Code,self,torch,liner,learning,implement,data,model
来源: https://www.cnblogs.com/lhx9527/p/16158043.html

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有