ICode9

精准搜索请尝试: 精确搜索
首页 > 编程语言> 文章详细

C++:onnxruntime调用FasterRCNN模型

2021-12-16 21:58:17  阅读:491  来源: 互联网

标签:std node onnxruntime Ort FasterRCNN C++ vector input output


背景:

        最近由于项目原因,需要用C++做一些目标检测的任务,就捣鼓一下YOLOv5,发现部署确实很方便,将YOLOv5模型转为onnx模型后,可以用OpenCV的dnn.readNetFromONNX读取该模型,接着就是输入预处理和输出结果解析的事情。

       然而,当我将tf15训练得到的FasterRCNN模型并利用tf2onnx成功转为onnx模型后,却不能用OpenCV读取,报出以下错误,而onnxruntime可以成功调用该模型。

cv2.error: OpenCV(4.5.4) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\onnx\onnx_graph_simplifier.cpp:692: 
error: (-210:Unsupported format or combination of formats) 
Unsupported data type: BOOL in function 'cv::dnn::dnn4_v20211004::getMatFromTensor'

大概意思可能是:不支持的数据类型从而导致不支持该操作吧

程序:

       所以,只好采用onnxruntime的C++接口进行模型调用,废话不多说,直接上代码:

//FRCNN.h
#pragma once

#include<iostream>
#include<fstream>
#include<numeric>
#include<opencv.hpp>
#include"../commonStruct.h"
#include"../BaseShipDetectionModel.h"
#include <onnxruntime_cxx_api.h>
#
class FRCNN:public BaseShipDetectionModel
{
public:
	FRCNN();
	~FRCNN();

	bool readModel(std::string &netPath, bool isCuda=false);
	bool DetectShip(cv::Mat &SrcImg, std::vector<Output> &output);
	void drawPredShip(cv::Mat &img, std::vector<Output>& result);

	
private:
	enum OutputFlag
	{
		//NOTHING,
		BOXES,
		SCORES,
		CLSIDS
	};


	Ort::Env *OnnxEnv;
	Ort::SessionOptions OnnxSessionOp;
	Ort::Session* OnnxSession;
	Ort::AllocatorWithDefaultOptions allocator;
	Ort::MemoryInfo *memory_info;
	cv::Size2f factor;
	const int netWidth = 1067;
	const int netHeight = 600;
	float nmsThreshold = 0.45;
	float boxThreshold = 0.31;
	float classThreshold = 0.25;
	size_t num_input_nodes, num_output_nodes;
	std::vector<const char*> input_node_names, output_node_names;
	std::vector<OutputFlag> output_node_namesFlag;
	
	//Ort::Value *input_tensor;
	std::vector<int64_t> input_node_dims = { netHeight, netWidth,3 };
	size_t input_tensor_size = 3 * netHeight * netWidth;

	void parseOnnxOutput(std::vector<Ort::Value>&inputTensors, std::vector<Output> &results);
	
};
//FRCNN.cpp
#include "FRCNN.h"

using namespace std;
using namespace cv;
using namespace dnn;

#if 1
FRCNN::FRCNN()
{
	num_input_nodes = 0;
	num_output_nodes = 0;
	//Ort::Env env(ORT_LOGGING_LEVEL_VERBOSE, "test");
	
	OnnxEnv = new Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test");
	//Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
	memory_info=new Ort::MemoryInfo(Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault));
	//memory_info =new Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
	if (OnnxEnv == nullptr) {
		std::cout << "new Error for " << VNAME(OnnxEnv) << std::endl;
		throw OnnxEnv;
	}
	if (memory_info == nullptr) {
		std::cout << "new Error for " << VNAME(memory_info) << std::endl;
		throw memory_info;
	}
	OnnxSessionOp.SetIntraOpNumThreads(5);
	OnnxSessionOp.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
	flag_onnx = true;
	/*cout << OnnxEnv << '\t' << *OnnxEnv << endl;
	cout << &env << '\t' << env << endl;*/
	//test(*OnnxEnv, env, OnnxEnv);
	//std::cout<<env.operator const OrtEnv *
}
FRCNN::~FRCNN()
{
	if(OnnxEnv!=nullptr)
		delete OnnxEnv;
	if (OnnxSession != nullptr)
		delete OnnxSession;
}

bool FRCNN::readModel(string &netPath, bool isCuda) {
	try 
	{		
		std::ifstream f(netPath.c_str());
		std::cout << f.good() << std::endl;
		
		std::wstring wnetPath = std::wstring(netPath.begin(), netPath.end());
		
		OnnxSession=new Ort::Session((*OnnxEnv), wnetPath.c_str(), OnnxSessionOp);
		if (OnnxSession == nullptr) {
			std::cout << "new Error for " << VNAME(OnnxSession) << std::endl;
			throw OnnxSession;
		}
		
		// print model input layer (node names, types, shape etc.)
		// print number of model input nodes
		num_input_nodes = OnnxSession->GetInputCount();
		num_output_nodes = OnnxSession->GetOutputCount();
		
		for (int i = 0; i < num_input_nodes; ++i) {
			input_node_names.push_back(OnnxSession->GetInputName(0, allocator));
			//= { "image:0"};
			std::cout << input_node_names[i] << std::endl;
		}
		for (int i = 0; i < num_output_nodes; ++i) {
			char* name = OnnxSession->GetOutputName(i, allocator);
			output_node_names.push_back(name);
			if (strstr(name, "boxes") != nullptr) {
				output_node_namesFlag.push_back(BOXES);
			}
			else if (strstr(name, "scores") != nullptr) {
				output_node_namesFlag.push_back(SCORES);
			}
			else if (strstr(name, "labels") != nullptr) {
				output_node_namesFlag.push_back(CLSIDS);
			}
			else {
				//output_node_namesFlag.push_back(NOTHING);
				throw(name);
			}
			//= { "output/boxes:0", "output/scores:0","output/labels:0"};
			std::cout << output_node_names[i] << std::endl;
		}
		
	}
	catch (const std::exception& e) {
		return false;
	}

	return true;
}


bool FRCNN::DetectShip(cv::Mat &SrcImg,  std::vector<Output> &results) {

	if (SrcImg.empty()) {
		std::cout << "empty image error!" << std::endl;
		return false;
	}
	int col = SrcImg.cols;
	int row = SrcImg.rows;
	int i, j;
	results.clear();

	Mat netInputImg,Img;
	
	std::vector<int> indices;
	Output result;
	//netInputImg.create(SrcImg.size,SrcImg.depth());
	
	//SrcImg.copyTo(netInputImg);
	cv::resize(SrcImg, Img, cv::Size(netWidth, netHeight), 0.0, 0.0, cv::INTER_LINEAR);
	factor= cv::Size2f((float)SrcImg.cols / netWidth, (float)SrcImg.rows / netHeight);
	try {
		netInputImg.create(cv::Size(netWidth, netHeight), CV_32FC3);//allocate the continuous Mat
		Img.convertTo(netInputImg, CV_32F);
		assert(netInputImg.isContinuous());//
		
		Ort::Value input_tensor = Ort::Value::CreateTensor<float>(*memory_info, (float*)netInputImg.data, input_tensor_size, input_node_dims.data(), 3);
		assert(input_tensor.IsTensor());
		std::vector<Ort::Value> ort_inputs;
		ort_inputs.push_back(std::move(input_tensor));
							
		//Run the Detection
		std::vector<Ort::Value> output_tensors = OnnxSession->Run(Ort::RunOptions{ nullptr }, input_node_names.data(), ort_inputs.data(), ort_inputs.size(), output_node_names.data(), 3);
		
		parseOnnxOutput(output_tensors,  results);
	}
	catch (...) {
		std::cout << "prediction error!" << std::endl;
		return false;
	}

	if (results.size())
		return true;
	else
		return false;

}

void FRCNN::drawPredShip(cv::Mat & img, std::vector<Output>& result)
{
	BaseShipDetectionModel::drawPredShip(img, result);
}

void FRCNN::parseOnnxOutput(std::vector<Ort::Value>& inputTensors, std::vector<Output>& results)
{
	std::vector<int64_t> classIds;
	std::vector<float> confidences;
	std::vector<cv::Rect> boxes;
	int i, j;

	std::vector<int64_t> shape;
	size_t eleCount;
	size_t DimCount;
	int xmin, xmax, ymin, ymax;
	for (i = 0; i < num_output_nodes; ++i) {
		Ort::TensorTypeAndShapeInfo Info = inputTensors[i].GetTensorTypeAndShapeInfo();
		//std::cout << ":GetDimensionsCount:" << Info.GetDimensionsCount() << '\t';
		shape = Info.GetShape();
		DimCount = shape.size();
		//std::cout << i << "shape:";
		//for (int j = 0; j < shape.size(); ++j) {
		//	std::cout << shape[j] << '\t';
		//}
		eleCount = Info.GetElementCount();
		//std::cout << ":GetElementCount:" << eleCount << '\t';

		ONNXTensorElementDataType onnxType = Info.GetElementType();
		void* ptr = nullptr;
		//std::cout << "GetElementType:" << onnxType << '\t' << std::endl;
		switch (onnxType) {
		case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:// maps to c type float
		{
			ptr = inputTensors[i].GetTensorMutableData<float>();
		}
		break;
		case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:// maps to c type int64_t
		{
			ptr = inputTensors[i].GetTensorMutableData<int64_t>();
		}
		break;
		default:
			throw("Unknown DataType!");
			break;
		}

		//std::cout << sizeof(ptr) << sizeof((float*)ptr) << sizeof((int64_t*)ptr)<<sizeof((uint8_t*)ptr)<< sizeof((uint16_t*)ptr)<< sizeof((int32_t*)ptr) << std::endl;
		//= { "output/boxes:0", "output/scores:0","output/labels:0"};
		/*
	output[0]//(44->(11,4))
	output[1]//(11->(11))
	output[2]//(11->(11))
	*/
		switch (output_node_namesFlag[i]) {
		case BOXES://xmin,ymin,xmax,ymax
		{
			float* p_boxes = (float*)ptr;
			for (j = 0; j < eleCount; j += 4) {
				xmin = p_boxes[j] *factor.width;
				ymin = p_boxes[j + 1] * factor.height;
				xmax = p_boxes[j + 2] *factor.width;
				ymax = p_boxes[j + 3] *factor.height;

				boxes.push_back(cv::Rect(xmin, ymin, xmax - xmin, ymax - ymin));
			}
			break;
		}
		case SCORES:
		{
			float* p_scores = (float*)ptr;
			for (j = 0; j < eleCount; j++) {
				confidences.push_back(p_scores[j]);
			}
			break;
		}
		case CLSIDS:
		{
			int64_t* p_clsids = (int64_t*)ptr;
			for (j = 0; j < eleCount; j++) {
				classIds.push_back(p_clsids[j]);
			}
			break;
		}	
		}

	}
	Output result;
	assert((boxes.size() == classIds.size())&&(boxes.size()==confidences.size()));
	for (i = 0; i < boxes.size(); ++i) {
		//j = indices[i];
		if (confidences[i] > boxThreshold) {
			result.ClsId = classIds[i]-1;//except background
			result.confidence = confidences[i];
			result.box = boxes[i];
			results.push_back(result);
		}
	}
		
}

#endif

代码调用顺序:

1.readModel//读取模型

2.DetectShip//检测目标,函数名根据需要修改

3.drawPredShip//画图,函数名根据需要修改

该代码对应FasterRCNN模型下载链接: fasterRCNN.model-深度学习文档类资源-CSDN文库

 后记:

本文仅为onnxruntime的C++调用作个笔记,特别是对输入数据准备与输出数据解析这两部分,如有疑问,请不吝指教!

标签:std,node,onnxruntime,Ort,FasterRCNN,C++,vector,input,output
来源: https://blog.csdn.net/u014426939/article/details/121984500

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有