From 2eb840375dcd470d9811268be162cf8c998784c0 Mon Sep 17 00:00:00 2001 From: wenbin Date: Mon, 1 Nov 2021 06:21:37 +0000 Subject: [PATCH 1/2] disable int8 --- paddle/fluid/inference/tensorrt/engine.cc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 575c0185863617..f3ea7ede1537bb 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -148,12 +148,21 @@ void TensorRTEngine::FreezeNetwork() { // and outputs have scales, // this layer's precision and output type are set to float32. // This step has no effect if this layer is fused during TRT optimization. + size_t layers_no_int8 = 0; for (int i = 0; i < network()->getNbLayers(); i++) { auto layer = network()->getLayer(i); if (!is_layer_int8(layer)) { layer->setPrecision(nvinfer1::DataType::kFLOAT); + ++layers_no_int8; } } + // Disable int8 or build engine failed if all layers aren't int8 + if (layers_no_int8 == network()->getNbLayers()) { + nvinfer1::BuilderFlags flags = infer_builder_config_->getFlags(); + flags = flags & ~(1U << static_cast(nvinfer1::BuilderFlag::kINT8)); + // reset flags + infer_builder_config_->setFlags(flags); + } #else LOG(WARNING) << "If your TensorRT version is lower than 5.1.2.2, you " "must provide quantization scales for all tensors using " From a971c1f2d35c422b703dd98ba29ffb01f7d77b41 Mon Sep 17 00:00:00 2001 From: wenbin Date: Mon, 1 Nov 2021 06:30:07 +0000 Subject: [PATCH 2/2] size_t to int --- paddle/fluid/inference/tensorrt/engine.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index f3ea7ede1537bb..64116b7973e710 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -148,7 +148,7 @@ void TensorRTEngine::FreezeNetwork() { // and outputs have scales, // this layer's precision and output type are set to float32. // This step has no effect if this layer is fused during TRT optimization. - size_t layers_no_int8 = 0; + int layers_no_int8 = 0; for (int i = 0; i < network()->getNbLayers(); i++) { auto layer = network()->getLayer(i); if (!is_layer_int8(layer)) {