diff --git a/paddle/fluid/inference/tensorrt/convert/activation_op.cc b/paddle/fluid/inference/tensorrt/convert/activation_op.cc index 44349bab0372ad27e85c47a5e15af4f283f8ec46..18de448690534656cdfe851c74a2b390264b1b6b 100644 --- a/paddle/fluid/inference/tensorrt/convert/activation_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/activation_op.cc @@ -49,10 +49,9 @@ class ActivationOpConverter : public OpConverter { layer->setAlpha(0.); layer->setBeta(6.); } - g #endif - auto output_name = op_desc.Output("Out")[0]; + auto output_name = op_desc.Output("Out")[0]; RreplenishLayerAndOutput(layer, op_type_, {output_name}, test_mode); if (op_desc.HasAttr("out_scale")) { diff --git a/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc b/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc index 25f0d866dcdc7b0ab1074fe132fa037e78908622..99b9f5746b8f2bb3205330decce813ca0ad44cbf 100644 --- a/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc @@ -26,13 +26,37 @@ class BatchNormOpConverter : public OpConverter { VLOG(3) << "convert a fluid batch norm op to tensorrt batch_norm"; framework::OpDesc op_desc(op, nullptr); - PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); - PADDLE_ENFORCE_EQ(op_desc.Input("Bias").size(), 1); // Bias is a weight - PADDLE_ENFORCE_EQ(op_desc.Input("Mean").size(), 1); // Mean is a weight - PADDLE_ENFORCE_EQ(op_desc.Input("Scale").size(), 1); // Scale is a weight - PADDLE_ENFORCE_EQ(op_desc.Input("Variance").size(), - 1); // Variance is a weight - PADDLE_ENFORCE_EQ(op_desc.Output("Y").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1, + platform::errors::InvalidArgument( + "Invalid input X's size of batch_norm TRT converter. " + "Expected 1, received %d.", + op_desc.Input("X").size())); + PADDLE_ENFORCE_EQ(op_desc.Input("Bias").size(), 1, + platform::errors::InvalidArgument( + "Invalid input Bias's size of batch_norm TRT " + "converter. Expected 1, received %d.", + op_desc.Input("Bias").size())); // Bias is a weight + PADDLE_ENFORCE_EQ(op_desc.Input("Mean").size(), 1, + platform::errors::InvalidArgument( + "Invalid input Mean's size of batch_norm TRT " + "converter. Expected 1, received %d.", + op_desc.Input("Mean").size())); // Mean is a weight + PADDLE_ENFORCE_EQ(op_desc.Input("Scale").size(), 1, + platform::errors::InvalidArgument( + "Invalid input Scale's size of batch_norm TRT " + "converter. Expected 1, received %d.", + op_desc.Input("Scale").size())); // Scale is a weight + PADDLE_ENFORCE_EQ( + op_desc.Input("Variance").size(), 1, + platform::errors::InvalidArgument( + "Invalid input Variance's size of batch_norm TRT converter. " + "Expected 1, received %d.", + op_desc.Input("Variance").size())); // Variance is a weight + PADDLE_ENFORCE_EQ(op_desc.Output("Y").size(), 1, + platform::errors::InvalidArgument( + "Invalid output Y's size of batch_norm TRT " + "converter. Expected 1, received %d.", + op_desc.Output("Y").size())); auto* X = engine_->GetITensor(op_desc.Input("X").front()); // Declare weights @@ -42,10 +66,22 @@ class BatchNormOpConverter : public OpConverter { auto* Variance_v = scope.FindVar(op_desc.Input("Variance").front()); const float eps = boost::get(op_desc.GetAttr("epsilon")); - PADDLE_ENFORCE_NOT_NULL(Bias_v); - PADDLE_ENFORCE_NOT_NULL(Mean_v); - PADDLE_ENFORCE_NOT_NULL(Scale_v); - PADDLE_ENFORCE_NOT_NULL(Variance_v); + PADDLE_ENFORCE_NOT_NULL( + Bias_v, + platform::errors::NotFound( + "Variable of Bias of batch_norm TRT converter is not found.")); + PADDLE_ENFORCE_NOT_NULL( + Mean_v, + platform::errors::NotFound( + "Variable of Mean of batch_norm TRT converter is not found.")); + PADDLE_ENFORCE_NOT_NULL( + Scale_v, + platform::errors::NotFound( + "Variable of Scale of batch_norm TRT converter is not found.")); + PADDLE_ENFORCE_NOT_NULL( + Variance_v, + platform::errors::NotFound( + "Variable of Variance of batch_norm TRT converter is not found.")); // get tensor auto* Bias_t = Bias_v->GetMutable();