ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

torchserve-gpu部署

2021-09-15 15:58:03  阅读:1073  来源: 互联网

标签:__ 部署 self torch torchserve import gpu model data


torchserve安装

  • torchserve cpu版本就不多介绍了,直接通过torchserve官方docker即可部署,着重说一下torchserve gpu
  • torchserve gpu版本,官方目前只提供了cuda10,自己部署了cuda11 docker

docker启动

cpu

docker run -it -p 18080:8080 -p 18081:8081 --name torch \
-v /home/model-server/model-store:/home/user/torch_model pytorch/torchserve:latest

gpu

gpus:可以指定gpu设备,不指定就代表全部gpu
p:这里前面是摄影出来的本机端口(可自己修改),后面是docker内部torchserve的的端口(不用改)

docker run -d -p 18080:8080 -p 18081:8081 -u root --name ts4 --gpus='"device=4"' \
-v /home/user/torch_model:/home/model-server/model-store lebrongg/torchserve-gpu

Mar包合成

需要安装torch-model-archiver,不多介绍

main_model.py(GPU)

这里只介绍GPU版本,CPU版本大同小异(数据是否cuda),玩过torch的应该都清楚。

# -*- coding=utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import torch
import torch.nn as nn
import os, sys, base64
from torchvision import transforms, models
from torch.autograd import Variable
from PIL import Image
import numpy as np
from io import BytesIO

os.environ['CUDA_VISIBLE_DEVICES'] = '0'

sys.path.append(os.path.abspath("."))

logging.basicConfig(level=logging.INFO,
                    format="%(asctime)s - %(levelname)s - %(message)s")

class ModelPredictor(torch.nn.Module):
    def __init__(self, model_path,
                 inference_speed=0.5,
                 threads_num=2,
                 use_jit=False):
        super(ModelPredictor, self).__init__()

        self.tfms = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(),
                                        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
		#限定一下线程数,要不然cpu会爆炸,一般2个即可
        torch.set_num_threads(threads_num)
        self.model = self.init_model(model_path)

    def init_model(self, model_path):
        print(model_path)
        model_ft = models.resnet50(pretrained=False)
        num_ftrs = model_ft.fc.in_features
        
        # model_ft.fc = nn.Linear(num_ftrs, 2)

        model_ft.fc = nn.Sequential(
                nn.Linear(num_ftrs, 1024),
                nn.ReLU(),
                nn.Dropout(0.4),
                nn.Linear(1024, 2))

        model_ft.load_state_dict(torch.load(model_path))
        model_ft.eval()
        model_ft = nn.DataParallel(model_ft.cuda())
        logging.info("Initialize Model Done".center(60, "="))
        return model_ft

    def forward(self, data):
        logging.info("input type:%s,input data type:%s"%(type(data),type(data)))
        data = data[0]['body']['data']
        imgdata = base64.b64decode(data.encode('utf8'))
        img = Image.open(BytesIO(imgdata)).convert('RGB')
        img = Variable(self.tfms(img).unsqueeze(0)).cuda()
        logits = self.model(img)
        index = torch.argmax(logits.data, -1).data.cpu().item()
        prob = round(torch.softmax(logits, -1).cpu().tolist()[0][1], 5)
        return [index, prob]

    def __call__(self, *args):
        return self.forward(*args)

    def predict(self, image):
        res = []
        prob = self.forward(image)
        res.append(prob)
        print(res)
        return res

if __name__ == "__main__":
    model_dir = '/home/user/model_manager/porndet'
    model_path = os.path.join(model_dir,'model.pth')
    model = ModelPredictor(model_path)
    img = Image.open('1.jpg').convert('RGB')
    output_buffer = BytesIO()
    img.save(output_buffer, format='PNG')
    byte_data = output_buffer.getvalue()
    img = base64.b64encode(byte_data).decode('utf8')
    data = [{'body': {'data': img}}]
    print(model(data))

model_handler.py

handler的CPU、GPU版本区别不大。其实模型直接写在这个里面也是可以的,看个人习惯。

# -*- coding=utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import sys
import os
import torch
import torch.nn as nn
import os
from torchvision import transforms, models
from torch.autograd import Variable
from PIL import Image
import numpy as np
from main_models import *

sys.path.append(os.path.abspath("."))

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

os.environ['CUDA_VISIBLE_DEVICES'] = '0'

class ModelHandler(object):
    """
    A base Model handler implementation.
    """

    def __init__(self):
        self.error = None
        self._context = None
        self.initialized = False
        self.model_predictor = None

    def initialize(self, context):
        """
        Initialize model. This will be called during model loading time
        :param context: Initial context contains model server system properties.
        :return:
        """
        #self.manifest = context.manifest
        self._context = context
        properties = context.system_properties
        model_dir = properties.get("model_dir")
        #save_model_path = './'
        print('model dir is ', model_dir)
        save_model_path=model_dir
        for file in os.listdir(model_dir):
            if file.split('.')[-1] == "pth":
                model_file = os.path.join(model_dir,file)
        # cpu predict
        self.model_predictor = ModelPredictor(model_file)

        self.initialized = True

    def preprocess(self, batch):
        """
        Transform raw input into model input data.
        :param batch: list of raw requests, should match batch size
        :return: list of preprocessed model input data
        """
        # Take the input data and pre-process it make it inference ready
        return batch

    def inference(self, model_input):
        """
        Internal inference methods
        :param model_input: transformed model input data
        :return: list of inference output in NDArray
        """
        # Do some inference call to engine here and return output
        with torch.no_grad():
            return self.model_predictor.predict(model_input)

    def postprocess(self, inference_output):
        """
        Return predict result in batch.
        :param inference_output: list of inference output
        :return: list of predict results
        """
        # Take output from network and post-process to desired format
        return inference_output

    def handle(self, data, context):
        model_input = self.preprocess(data)
        model_out = self.inference(model_input)
        return self.postprocess(model_out)

mar包生成

torch-model-archiver \
--model-name servemodel \
--version 2021091500 \
--model-file ./serve/main_models.py \
--serialized-file ./serve/model.pth \
--handler ./serve/model_handler.py \
--export-path /home/user/torch_model \
--runtime python3 --force

模型部署

创建的时候挂载了一个本地地址,前面是自己的本机地址,后面是docker里面模型地址。

-v /home/user/torch_model:/home/model-server/model-store

生成后的模型可以直接挂载到docker内部。

模型注册

curl -v -X POST "http://localhost:18081/models?initial_workers=4&url=servemodel.mar"

修改配置

curl -v -X PUT "http://localhost:18081/models/servemodel?min_worker=10"

查看模型

curl "http://localhost:18081/models"

模型下架

curl -X DELETE "http://localhost:18081/models/servemodel"

接口预测

import requests
import cv2
import numpy as np
from PIL import Image
from io import BytesIO
import base64
import time
import json
import concurrent.futures

sess = requests.Session()

file = '1.png'
onnx = 'http://127.0.0.1:18888/predict'
gpu = 'http://127.0.0.1:18080/predictions/servemodel'

def test_onnx(data):
    res = sess.post(onnx, data=data, timeout=2).text
    return res

def test_gpu(data):
    res = sess.post(gpu, json=data, timeout=2).text
    return res

if __name__ == '__main__':
    p = concurrent.futures.ProcessPoolExecutor(max_workers=30) 
    # ProcessPoolExecutor ThreadPoolExecutor
    image = cv2.imread(file)
    image = cv2.resize(image, (224, 224))
    image = cv2.imencode('.png', image)[1]
    img_b64encode = base64.b64encode(image).decode('utf8')

    data = {"data": img_b64encode}

    t1 = time.time()

    files = [data for i in range(2000)]

    # for i in range(5):
    #     t2 = time.time()
    #     res = test_gpu(data)
    #     print(time.time() - t2, res)
    
    # res = list(p.map(test_onnx, files))
    res = list(p.map(test_gpu, files))

    print(res)
    print(time.time() - t1)

参考文献

  • https://blog.csdn.net/weixin_34910922/article/details/114550772
  • https://blog.csdn.net/qq_28613337/article/details/110438060
  • https://jishuin.proginn.com/p/763bfbd2c54d

标签:__,部署,self,torch,torchserve,import,gpu,model,data
来源: https://blog.csdn.net/qq_40415753/article/details/120309563

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有