ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

Yolov5 libtorch 训练自己的数据并用liborch 部署

2021-02-27 16:59:06  阅读:868  来源: 互联网

标签:Yolov5 pred torch indexes liborch libtorch dets cv select


环境:ubuntu18.01(训练平台) , windows / vs2017 部署平台  opencv3.4.7 (提前编译好的)cuda10.1  pytorch1.6  

yolov5 项目:https://github.com/ultralytics/yolov5

yolov5  v2.0模型下载链接:https://github.com/ultralytics/yolov5/releases

训练阶段:略

 

 

 

libtorch部署 win10 vs2017, opencv3.4.7, libtorch 1.6:

#include <opencv2/opencv.hpp>
#include <torch/script.h>
#include <algorithm>
#include <iostream>
#include <time.h>

using namespace std;

std::vector<torch::Tensor> non_max_suppression(torch::Tensor preds, float score_thresh = 0.5, float iou_thresh = 0.5)
{
    std::vector<torch::Tensor> output;
    for (size_t i = 0; i < preds.sizes()[0]; ++i)
    {
        torch::Tensor pred = preds.select(0, i);

        // Filter by scores
        torch::Tensor scores = pred.select(1, 4) * std::get<0>(torch::max(pred.slice(1, 5, pred.sizes()[1]), 1));
        pred = torch::index_select(pred, 0, torch::nonzero(scores > score_thresh).select(1, 0));
        if (pred.sizes()[0] == 0) continue;

        // (center_x, center_y, w, h) to (left, top, right, bottom)
        pred.select(1, 0) = pred.select(1, 0) - pred.select(1, 2) / 2;
        pred.select(1, 1) = pred.select(1, 1) - pred.select(1, 3) / 2;
        pred.select(1, 2) = pred.select(1, 0) + pred.select(1, 2);
        pred.select(1, 3) = pred.select(1, 1) + pred.select(1, 3);

        // Computing scores and classes
        std::tuple<torch::Tensor, torch::Tensor> max_tuple = torch::max(pred.slice(1, 5, pred.sizes()[1]), 1);
        pred.select(1, 4) = pred.select(1, 4) * std::get<0>(max_tuple);
        pred.select(1, 5) = std::get<1>(max_tuple);

        torch::Tensor  dets = pred.slice(1, 0, 6);

        torch::Tensor keep = torch::empty({ dets.sizes()[0] });
        torch::Tensor areas = (dets.select(1, 3) - dets.select(1, 1)) * (dets.select(1, 2) - dets.select(1, 0));
        std::tuple<torch::Tensor, torch::Tensor> indexes_tuple = torch::sort(dets.select(1, 4), 0, 1);
        torch::Tensor v = std::get<0>(indexes_tuple);
        torch::Tensor indexes = std::get<1>(indexes_tuple);
        int count = 0;
        while (indexes.sizes()[0] > 0)
        {
            keep[count] = (indexes[0].item().toInt());
            count += 1;

            // Computing overlaps
            torch::Tensor lefts = torch::empty(indexes.sizes()[0] - 1);
            torch::Tensor tops = torch::empty(indexes.sizes()[0] - 1);
            torch::Tensor rights = torch::empty(indexes.sizes()[0] - 1);
            torch::Tensor bottoms = torch::empty(indexes.sizes()[0] - 1);
            torch::Tensor widths = torch::empty(indexes.sizes()[0] - 1);
            torch::Tensor heights = torch::empty(indexes.sizes()[0] - 1);
            for (size_t i = 0; i < indexes.sizes()[0] - 1; ++i)
            {
                lefts[i] = std::max(dets[indexes[0]][0].item().toFloat(), dets[indexes[i + 1]][0].item().toFloat());
                tops[i] = std::max(dets[indexes[0]][1].item().toFloat(), dets[indexes[i + 1]][1].item().toFloat());
                rights[i] = std::min(dets[indexes[0]][2].item().toFloat(), dets[indexes[i + 1]][2].item().toFloat());
                bottoms[i] = std::min(dets[indexes[0]][3].item().toFloat(), dets[indexes[i + 1]][3].item().toFloat());
                widths[i] = std::max(float(0), rights[i].item().toFloat() - lefts[i].item().toFloat());
                heights[i] = std::max(float(0), bottoms[i].item().toFloat() - tops[i].item().toFloat());
            }
            torch::Tensor overlaps = widths * heights;

            // FIlter by IOUs
            torch::Tensor ious = overlaps / (areas.select(0, indexes[0].item().toInt()) + torch::index_select(areas, 0, indexes.slice(0, 1, indexes.sizes()[0])) - overlaps);
            indexes = torch::index_select(indexes, 0, torch::nonzero(ious <= iou_thresh).select(1, 0) + 1);
        }
        keep = keep.toType(torch::kInt64);
        output.push_back(torch::index_select(dets, 0, keep.slice(0, 0, count)));
    }
    return output;
}


int main()
{
    // Loading  Module
    torch::jit::script::Module module = torch::jit::load("libtorchYolov5\\libtorchYolov5\\my_best.jit");
    module.to(at::kCPU);
    std::vector<std::string> classnames;
    std::ifstream f("libtorchYolov5\\libtorchYolov5\\shoes.names");
    std::string name = "";
    while (std::getline(f, name))
    {
        classnames.push_back(name);
    }
    /*cv::VideoCapture cap = cv::VideoCapture(0);
    cap.set(cv::CAP_PROP_FRAME_WIDTH, 1920);
    cap.set(cv::CAP_PROP_FRAME_HEIGHT, 1080);*/
    cv::Mat frame, img;
    frame = cv::imread("libtorchYolov5\\x64\\Release\\04.jpg");
    clock_t start = clock();
    //第一种方式
    cv::resize(frame, img, cv::Size(640, 640)); //384
    cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
    torch::Tensor imgTensor = torch::from_blob(img.data, { img.rows, img.cols,3 }, torch::kByte);
    imgTensor = imgTensor.permute({ 2,0,1 });
    imgTensor = imgTensor.toType(torch::kFloat);
    imgTensor = imgTensor.div(255);
    imgTensor = imgTensor.unsqueeze(0);
    torch::Tensor preds = module.forward({ imgTensor }).toTuple()->elements()[0].toTensor();

    //第二种方式
    // preds: [?, 15120, 9]
    //cv::resize(frame, img, cv::Size(640, 640)); //384
    //torch::DeviceType device_type = at::kCPU;
    //cv::cvtColor(img, img, cv::COLOR_BGR2RGB);  // BGR -> RGB
    //img.convertTo(img, CV_32FC3, 1.0f / 255.0f);  // normalization 1/255
    //auto imgTensor = torch::from_blob(img.data, { 1, img.rows, img.cols, img.channels() }).to(device_type);
    //imgTensor = imgTensor.permute({ 0, 3, 1, 2 }).contiguous();  // BHWC -> BCHW (Batch, Channel, Height, Width)
    //std::vector<torch::jit::IValue> inputs;
    //inputs.emplace_back(imgTensor);
     preds: [?, 15120, 9]
    //torch::jit::IValue output = module.forward(inputs); // CPUFloatType{1,3,12,20,85}
    //auto preds = output.toTuple()->elements()[0].toTensor();
    
    std::vector<torch::Tensor> dets = non_max_suppression(preds, 0.4, 0.5);
    cout << "det:" << dets.size() << endl;
    cout << "det:" << dets[0] << endl;
    if (dets.size() > 0)
    {
        // Visualize result
        for (size_t i = 0; i < dets[0].sizes()[0]; ++i)
        {
            float left = dets[0][i][0].item().toFloat() * frame.cols / 640;
            float top = dets[0][i][1].item().toFloat() * frame.rows / 640; // 384
            float right = dets[0][i][2].item().toFloat() * frame.cols / 640;
            float bottom = dets[0][i][3].item().toFloat() * frame.rows / 640; //384
            float score = dets[0][i][4].item().toFloat();
            int classID = dets[0][i][5].item().toInt();
            
            cv::rectangle(frame, cv::Rect(left, top, (right - left), (bottom - top)), cv::Scalar(0, 255, 0), 2);

            cv::putText(frame,
                classnames[classID] + ": " + cv::format("%.2f", score),
                cv::Point(left, top),
                cv::FONT_HERSHEY_SIMPLEX, (right - left) / 200, cv::Scalar(0, 255, 0), 2);
        }
    }
    cv::putText(frame, "FPS: " + std::to_string(int(1e7 / (clock() - start))),
        cv::Point(50, 50),
        cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);
    cv::imwrite("libtorchYolov5\\x64\\Release\\00.jpg", frame);
    cv::imshow("", frame);
    cv::waitKey(0);
        //if (cv::waitKey(1) == 27) break;
    //}
    return 0;
}

结果:

 


 

标签:Yolov5,pred,torch,indexes,liborch,libtorch,dets,cv,select
来源: https://blog.csdn.net/xingtianyao/article/details/114178278

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有