c++配置yolov7,Failed to parse ONNX model from file: /workspace/models/yolov7/v1.0/tensorrt_inference/yolov7.onnx
我的是windows10,cudnn,cuda,tensorrt,yaml等都是严格正确的
Could not open file /workspace/models/yolov7/v1.0/tensorrt_inference/yolov7.onnx
Could not open file /workspace/models/yolov7/v1.0/tensorrt_inference/yolov7.onnx
Failed to parse ONNX model from file: /workspace/models/yolov7/v1.0/tensorrt_inference/yolov7.onnx
void Model::onnxToTRTModel() {
// create the builder
nvinfer1::IBuilder *builder = nvinfer1::createInferBuilder(gLogger.getTRTLogger());
assert(builder != nullptr);
const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
auto network = builder->createNetworkV2(explicitBatch);
auto config = builder->createBuilderConfig();
auto parser = nvonnxparser::createParser(*network, gLogger.getTRTLogger());
if (!parser->parseFromFile(onnx_file.c_str(), static_cast<int>(gLogger.getReportableSeverity()))) {
gLogError << "Failure while parsing ONNX file" << std::endl;
}
// Build the engine
builder->setMaxBatchSize(BATCH_SIZE);
config->setMaxWorkspaceSize(1_GiB);
config->setFlag(nvinfer1::BuilderFlag::kFP16);
std::cout << "start building engine" << std::endl;
engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "build engine done" << std::endl;
assert(engine);
// we can destroy the parser
parser->destroy();
// save engine
nvinfer1::IHostMemory *data = engine->serialize();
std::ofstream file;
file.open(engine_file, std::ios::binary | std::ios::out);
std::cout << "writing engine file..." << std::endl;
file.write((const char *) data->data(), data->size());
std::cout << "save engine file done" << std::endl;
file.close();
// then close everything down
network->destroy();
builder->destroy();
}
我的代码onnx代码是这样的
打开这个文件夹