diff --git a/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc b/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc index a5581737eefb66b8d0c0d8dc90300f297a4d1ac4..0b26496a94e9f86a1f4d4602c284acd74dfac86d 100644 --- a/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc @@ -27,12 +27,20 @@ class LeakyReluOpConverter : public OpConverter { framework::OpDesc op_desc(op, nullptr); // Declare inputs - int input_num = op_desc.Input("X").size(); - PADDLE_ENFORCE(input_num == 1); + size_t input_num = op_desc.Input("X").size(); + PADDLE_ENFORCE_EQ(input_num, 1UL, + platform::errors::InvalidArgument( + "Invalid number of TRT leaky_relu op converter " + "inputs. Expected 1, but received %d", + input_num)); auto* input = engine_->GetITensor(op_desc.Input("X")[0]); // Get output size_t output_num = op_desc.Output("Out").size(); - PADDLE_ENFORCE(output_num == 1); + PADDLE_ENFORCE_EQ(output_num, 1UL, + platform::errors::InvalidArgument( + "Invalid number of TRT leaky_relu op converter " + "outputs. Expected 1, but received %d", + output_num)); // Get attrs float alpha = boost::get(op_desc.GetAttr("alpha")); nvinfer1::ILayer* output_layer = nullptr; @@ -66,11 +74,17 @@ class LeakyReluOpConverter : public OpConverter { auto* scale_layer = TRT_ENGINE_ADD_LAYER( engine_, Scale, *input, nvinfer1::ScaleMode::kUNIFORM, shift.get(), scale.get(), power.get()); - PADDLE_ENFORCE(nullptr != scale_layer); + PADDLE_ENFORCE_NOT_NULL( + scale_layer, platform::errors::InvalidArgument( + "Invalid scale layer in leaky_relu TRT op converter. " + "The scale layer should not be null.")); // y_relu = (x > 0) : x : 0 auto* relu_layer = TRT_ENGINE_ADD_LAYER(engine_, Activation, *input, nvinfer1::ActivationType::kRELU); - PADDLE_ENFORCE(nullptr != relu_layer); + PADDLE_ENFORCE_NOT_NULL( + relu_layer, platform::errors::InvalidArgument( + "Invalid relu layer in leaky_relu TRT op converter. " + "The relu layer should not be null.")); // TensorRTEngine::Weight sub_scale{nvinfer1::DataType::kFLOAT, &alpha_data[1], 1}; @@ -78,16 +92,29 @@ class LeakyReluOpConverter : public OpConverter { TRT_ENGINE_ADD_LAYER(engine_, Scale, *(relu_layer->getOutput(0)), nvinfer1::ScaleMode::kUNIFORM, shift.get(), sub_scale.get(), power.get()); - PADDLE_ENFORCE(nullptr != scale_relu_layer); + PADDLE_ENFORCE_NOT_NULL( + scale_relu_layer, + platform::errors::InvalidArgument( + "Invalid scale_relu layer in leaky_relu TRT op converter. The " + "scale_relu layer should not be null.")); output_layer = TRT_ENGINE_ADD_LAYER(engine_, ElementWise, *(scale_layer->getOutput(0)), *(scale_relu_layer->getOutput(0)), nvinfer1::ElementWiseOperation::kSUM); - PADDLE_ENFORCE(nullptr != output_layer); + PADDLE_ENFORCE_NOT_NULL( + output_layer, platform::errors::InvalidArgument( + "Invalid output layer in leaky_relu TRT op " + "converter. The output layer should not be null.")); // keep alpha tensor to avoid release it's memory std::string alpha_name = op_desc.Output("Out")[0] + "_alpha"; - PADDLE_ENFORCE(engine_->weight_map.find(alpha_name) == - engine_->weight_map.end()); + bool alpha_not_in_weight_map = + (engine_->weight_map.find(alpha_name) == engine_->weight_map.end()); + PADDLE_ENFORCE_EQ(alpha_not_in_weight_map, true, + platform::errors::InvalidArgument( + "The name of parameter alpha in leaky_relu TRT op " + "converter is already " + "found in the weight map. The same weight cannot be " + "set twice. Please check if it is already set.")); engine_->SetWeights(alpha_name, std::move(alpha_tensor)); #endif auto output_name = op_desc.Output("Out")[0]; diff --git a/paddle/fluid/inference/tensorrt/convert/prelu_op.cc b/paddle/fluid/inference/tensorrt/convert/prelu_op.cc index 88dd1e0b5247a51d393ed334c5f9f7e7b944bc40..5c0aa07192dd7d34fdf4b4fa1adef1b62d5e8374 100644 --- a/paddle/fluid/inference/tensorrt/convert/prelu_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/prelu_op.cc @@ -30,17 +30,27 @@ class PReluOpConverter : public OpConverter { framework::OpDesc op_desc(op, nullptr); // Declare inputs - int input_num = op_desc.Input("X").size(); - PADDLE_ENFORCE(input_num == 1); + size_t input_num = op_desc.Input("X").size(); + PADDLE_ENFORCE_EQ(input_num, 1UL, + platform::errors::InvalidArgument( + "Invalid input X's size of prelu TRT converter. " + "Expected 1, received %d.", + input_num)); auto* input = engine_->GetITensor(op_desc.Input("X")[0]); // Get output size_t output_num = op_desc.Output("Out").size(); - PADDLE_ENFORCE(output_num == 1); + PADDLE_ENFORCE_EQ(output_num, 1UL, + platform::errors::InvalidArgument( + "Invalid output Out's size of prelu TRT converter. " + "Expected 1, received %d.", + output_num)); // Get attrs std::string mode = boost::get(op_desc.GetAttr("mode")); // auto* alpha_var = scope.FindVar(op_desc.Input("Alpha")[0]); - PADDLE_ENFORCE_NOT_NULL(alpha_var); + PADDLE_ENFORCE_NOT_NULL( + alpha_var, platform::errors::NotFound( + "Variable Alpha of prelu TRT converter is not found.")); auto* alpha_tensor = alpha_var->GetMutable(); platform::CPUPlace cpu_place; diff --git a/paddle/fluid/inference/tensorrt/convert/split_op.cc b/paddle/fluid/inference/tensorrt/convert/split_op.cc index cf3f4c007adec971622678c9c3203a09106dcc77..90136c7d5db697b577ffa138ed7ff90b19ca9997 100644 --- a/paddle/fluid/inference/tensorrt/convert/split_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/split_op.cc @@ -29,14 +29,21 @@ class SplitOpConverter : public OpConverter { // Declare inputs auto* input = engine_->GetITensor(op_desc.Input("X")[0]); auto input_dims = input->getDimensions(); - int input_num = op_desc.Input("X").size(); + size_t input_num = op_desc.Input("X").size(); size_t output_num = op_desc.Output("Out").size(); // Get Attrs - PADDLE_ENFORCE(input_num == 1); + PADDLE_ENFORCE_EQ(input_num, 1UL, + platform::errors::InvalidArgument( + "Invalid input X's size of split TRT converter. " + "Expected 1, received %d.", + input_num)); int axis = boost::get(op_desc.GetAttr("axis")); // split on batch is not supported in TensorRT - PADDLE_ENFORCE(axis != 0); + PADDLE_ENFORCE_NE( + axis, 0, + platform::errors::InvalidArgument( + "Invalid split axis. Split on batch is not supported in TensorRT")); std::vector output_lengths = boost::get>(op_desc.GetAttr("sections")); @@ -58,9 +65,13 @@ class SplitOpConverter : public OpConverter { "The (%d) dim of input should not be -1", axis)); if (num > 0) { int64_t in_axis_dim = input_dims.d[axis]; - PADDLE_ENFORCE_EQ(in_axis_dim % num, 0, - "Tensor split does not result" - " in an equal division"); + PADDLE_ENFORCE_EQ( + in_axis_dim % num, 0, + platform::errors::InvalidArgument( + "Invalid number to split. Tensor split does not result" + " in an equal division of dimensions. Axis dim = %d %% num = %d " + "!= 0", + in_axis_dim, num)); size_t out_axis_dim = in_axis_dim / num; for (int i = 0; i < num; ++i) { output_lengths.push_back(out_axis_dim); diff --git a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu index 132176220d38813bee0f114c9a906554d39382f1..7a032acef676bfb360d00378a3db8b21c509197a 100644 --- a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu @@ -45,8 +45,17 @@ __device__ int upper_bound(T const* vals, int n, T const& key) { nvinfer1::Dims SplitPlugin::getOutputDimensions( int index, const nvinfer1::Dims* input_dims, int num_inputs) { - PADDLE_ENFORCE_EQ(num_inputs, 1); - PADDLE_ENFORCE_LT(index, this->getNbOutputs()); + PADDLE_ENFORCE_EQ(num_inputs, 1, + platform::errors::InvalidArgument( + "Invalid number of inputs of split TRT plugin. " + "Expected 1, received %d.", + num_inputs)); + PADDLE_ENFORCE_LT( + index, this->getNbOutputs(), + platform::errors::InvalidArgument( + "Index of output should be less than the total number of outputs in " + "split TensorRT plugin. Received index = %d >= total outputs = %d", + index, this->getNbOutputs())); nvinfer1::Dims output_dims = input_dims[0]; output_dims.d[axis_] = output_length_.at(index); @@ -54,7 +63,11 @@ nvinfer1::Dims SplitPlugin::getOutputDimensions( } int SplitPlugin::initialize() { - PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS); + PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS, + platform::errors::InvalidArgument( + "Axis dimension exceeds max dimension in TensorRT. " + "Received axis = %d > MAX_DIMS = %d", + axis_, nvinfer1::Dims::MAX_DIMS)); // notice input dims is [C, H, W] nvinfer1::Dims dims = this->getInputDims(0); outer_rows_ = 1; @@ -111,9 +124,12 @@ int SplitPlugin::enqueue(int batchSize, const void* const* inputs, float const* input_ptr = reinterpret_cast(inputs[0]); float* const* h_odatas = reinterpret_cast(outputs); float** output_ptrs = thrust::raw_pointer_cast(&d_output_ptrs_[0]); - PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemcpyAsync( - output_ptrs, h_odatas, d_output_ptrs_.size() * sizeof(float*), - cudaMemcpyHostToDevice, stream)); + PADDLE_ENFORCE_CUDA_SUCCESS( + cudaMemcpyAsync(output_ptrs, h_odatas, + d_output_ptrs_.size() * sizeof(float*), + cudaMemcpyHostToDevice, stream), + platform::errors::External( + "CUDA Memcpy failed during split plugin run.")); int outer_rows = outer_rows_ * batchSize; @@ -159,7 +175,7 @@ bool SplitPluginDynamic::supportsFormatCombination( int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( - "The input of swish plugin shoule not be nullptr.")); + "The input of split plugin should not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h index 088dd6d4721becde0ffef64cb31eeb52ac759246..30a554ca40c024d49bbc5336a697c2c3ae5b7e6d 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h @@ -232,8 +232,14 @@ class TensorRTEngineOp : public framework::OperatorBase { auto t_shape = framework::vectorize(t.dims()); runtime_batch = t_shape[0]; const int bind_index = engine->engine()->getBindingIndex(x.c_str()); - PADDLE_ENFORCE(bind_index < num_bindings, - "The bind index should be less than num_bindings"); + PADDLE_ENFORCE_LT( + bind_index, num_bindings, + platform::errors::InvalidArgument( + "Wrong TRT engine input binding index. Expected The " + "binding index of TRT engine input to be less than " + "the number of inputs and outputs. Received binding " + "index=%d >= total inputs and outputs=%d", + bind_index, num_bindings)); if (!engine->with_dynamic_shape()) { // check if the input shapes are consistent with model. if (HasAttr(x + "_shape")) {