From f23ea01058617bd55643098357b60159784e71ff Mon Sep 17 00:00:00 2001 From: dusty-nv Date: Wed, 5 Dec 2018 17:37:24 +0000 Subject: [PATCH] fix for TensorRT 3.0 --- calibration/randInt8Calibrator.cpp | 4 ++++ calibration/randInt8Calibrator.h | 3 +++ tensorNet.cpp | 13 +++++++++++++ 3 files changed, 20 insertions(+) diff --git a/calibration/randInt8Calibrator.cpp b/calibration/randInt8Calibrator.cpp index 48758c7f2..a54f4ca8b 100644 --- a/calibration/randInt8Calibrator.cpp +++ b/calibration/randInt8Calibrator.cpp @@ -28,6 +28,8 @@ #include +#if NV_TENSORRT_MAJOR >= 4 + //------------------------------------------------------------------------------------------------- static inline int volume(nvinfer1::Dims dims) { @@ -108,3 +110,5 @@ void randInt8Calibrator::writeCalibrationCache( const void*, size_t ) } +#endif + diff --git a/calibration/randInt8Calibrator.h b/calibration/randInt8Calibrator.h index 8f24c494d..9a6a81a33 100644 --- a/calibration/randInt8Calibrator.h +++ b/calibration/randInt8Calibrator.h @@ -30,6 +30,8 @@ #include +#if NV_TENSORRT_MAJOR >= 4 + /** * Random INT8 Calibrator. * This calibrator is for testing performance without needing @@ -80,4 +82,5 @@ class randInt8Calibrator : public nvinfer1::IInt8EntropyCalibrator }; #endif +#endif diff --git a/tensorNet.cpp b/tensorNet.cpp index 04cb5c5b2..a7518b0da 100644 --- a/tensorNet.cpp +++ b/tensorNet.cpp @@ -215,9 +215,11 @@ std::vector tensorNet::DetectNativePrecisions( deviceType device if( builder->platformHasFastFp16() ) types.push_back(TYPE_FP16); +#if NV_TENSORRT_MAJOR >= 4 // detect fast (native) INT8 if( builder->platformHasFastInt8() ) types.push_back(TYPE_INT8); +#endif // print out supported precisions (optional) const uint32_t numTypes = types.size(); @@ -314,6 +316,7 @@ bool tensorNet::ProfileModel(const std::string& deployFile, // name for caf } // extract the dimensions of the network input blobs +#if NV_TENSORRT_MAJOR >= 4 std::map inputDimensions; for( int i=0, n=network->getNbInputs(); i < n; i++ ) @@ -322,6 +325,7 @@ bool tensorNet::ProfileModel(const std::string& deployFile, // name for caf inputDimensions.insert(std::make_pair(network->getInput(i)->getName(), dims)); std::cout << LOG_TRT << "retrieved Input tensor \"" << network->getInput(i)->getName() << "\": " << dims.d[0] << "x" << dims.d[1] << "x" << dims.d[2] << std::endl; } +#endif // the caffe file has no notion of outputs, so we need to manually say which tensors the engine should generate const size_t num_outputs = outputs.size(); @@ -334,8 +338,10 @@ bool tensorNet::ProfileModel(const std::string& deployFile, // name for caf printf(LOG_GIE "failed to retrieve tensor for Output \"%s\"\n", outputs[n].c_str()); else { + #if NV_TENSORRT_MAJOR >= 4 nvinfer1::Dims3 dims = static_cast(tensor->getDimensions()); printf(LOG_GIE "retrieved Output tensor \"%s\": %ix%ix%i\n", tensor->getName(), dims.d[0], dims.d[1], dims.d[2]); + #endif } network->markOutput(*tensor); @@ -351,6 +357,7 @@ bool tensorNet::ProfileModel(const std::string& deployFile, // name for caf // set up the builder for the desired precision if( precision == TYPE_INT8 ) { + #if NV_TENSORRT_MAJOR >= 4 builder->setInt8Mode(true); //builder->setFp16Mode(true); // TODO: experiment for benefits of both INT8/FP16 @@ -361,6 +368,12 @@ bool tensorNet::ProfileModel(const std::string& deployFile, // name for caf } builder->setInt8Calibrator(calibrator); + #else + printf(LOG_TRT "INT8 precision requested, and TensorRT %u.%u doesn't meet minimum version for INT8\n", NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR); + printf(LOG_TRT "please use minumum version of TensorRT 4.0 or newer for INT8 support\n"); + + return false; + #endif } else if( precision == TYPE_FP16 ) {