未验证 提交 ed5766ff 编写于 作者: Z Zhaolong Xing 提交者: GitHub

refine act conv2d pool2d trt converter log (#23605)

* refine act conv2d pool2d fc, trt converter log
test=develop

* fix comments
test=develop
上级 70f7c23f
......@@ -36,7 +36,9 @@ class ActivationOpConverter : public OpConverter {
auto op_pair = ops.find(op_type_);
if (op_pair == ops.end()) {
PADDLE_THROW("Wrong activation op type!");
PADDLE_THROW(platform::errors::Fatal(
"Wrong activation op type, the trt do not support the %s act type.",
op_type_));
}
nvinfer1::IActivationLayer* layer = TRT_ENGINE_ADD_LAYER(
......
......@@ -26,14 +26,25 @@ void ConvertConv2d(TensorRTEngine* engine, const framework::proto::OpDesc& op,
VLOG(3) << "convert a fluid " << name << " op to tensorrt layer without bias";
framework::OpDesc op_desc(op, nullptr);
PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1);
PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1); // Y is a weight
PADDLE_ENFORCE_EQ(op_desc.Output("Output").size(), 1);
PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1UL,
platform::errors::InvalidArgument(
"TRT Conv2d expect 1 input, but got %d input.",
op_desc.Input("Input").size()));
PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1UL,
platform::errors::InvalidArgument(
"TRT Conv2d expect 1 filter, but got %d filter.",
op_desc.Input("Filter").size()));
PADDLE_ENFORCE_EQ(op_desc.Output("Output").size(), 1UL,
platform::errors::InvalidArgument(
"TRT Conv2d expect 1 output, but got %d output.",
op_desc.Output("Output").size()));
PADDLE_ENFORCE(engine != nullptr);
auto* X = engine->GetITensor(op_desc.Input("Input").front());
auto* Y_v = scope.FindVar(op_desc.Input("Filter").front());
PADDLE_ENFORCE_NOT_NULL(Y_v);
std::string filter_var_name = op_desc.Input("Filter").front();
auto* Y_v = scope.FindVar(filter_var_name);
PADDLE_ENFORCE_NOT_NULL(
Y_v, platform::errors::NotFound(
"Can not find %s presistale var in scope.", filter_var_name));
auto* Y_t = Y_v->GetMutable<framework::LoDTensor>();
float* weight_data = nullptr;
bool enable_int8 = boost::get<bool>(op_desc.HasAttr("enable_int8"));
......@@ -53,7 +64,11 @@ void ConvertConv2d(TensorRTEngine* engine, const framework::proto::OpDesc& op,
engine->GetWeightCPUData(op_desc.Input("Filter").front(), Y_t, false);
}
PADDLE_ENFORCE_EQ(Y_t->dims().size(), 4UL);
PADDLE_ENFORCE_EQ(Y_t->dims().size(), 4UL,
platform::errors::InvalidArgument(
"The conv2d filter's dims size should be 4, but got %d",
Y_t->dims().size()));
const int n_output = Y_t->dims()[0];
const int n_input = Y_t->dims()[1];
const int filter_h = Y_t->dims()[2];
......@@ -133,10 +148,14 @@ class Deconv2dOpConverter : public OpConverter {
return layer;
},
[](nvinfer1::IDeconvolutionLayer* layer, nvinfer1::DimsHW& dilations) {
PADDLE_ENFORCE(
dilations.d[0] == 1 && dilations.d[1] == 1,
"Dilations must be (1, 1) for tensorRT, but given (%d, %d)",
dilations.d[0], dilations.d[1]);
// In trt Deconv, dilation should be 1, ohter values are not
// supported.
bool condition = (dilations.d[0] == 1 && dilations.d[1] == 1);
PADDLE_ENFORCE_EQ(condition, true,
platform::errors::InvalidArgument(
"In Deconv, Dilations must be (1, 1) for "
"tensorRT, but given (%d, %d)",
dilations.d[0], dilations.d[1]));
},
"conv2d_transpose");
}
......
......@@ -40,7 +40,9 @@ class FcOpConverter : public OpConverter {
auto* X = engine_->GetITensor(op_desc.Input(i_name).front());
// Declare weights
auto* Y_v = scope.FindVar(op_desc.Input(w_name).front());
PADDLE_ENFORCE_NOT_NULL(Y_v);
PADDLE_ENFORCE_NOT_NULL(
Y_v, platform::errors::NotFound(
"Can not find %s presistale var of fc in scope.", w_name));
auto* Y_t = Y_v->GetMutable<framework::LoDTensor>();
const int x_num_col_dims =
op_desc.HasAttr("x_num_col_dims")
......@@ -71,7 +73,11 @@ class FcOpConverter : public OpConverter {
engine_->GetWeightCPUData(op_desc.Input(w_name).front(), Y_t, false);
}
PADDLE_ENFORCE_EQ(Y_t->dims().size(), 2UL); // a matrix
PADDLE_ENFORCE_EQ(Y_t->dims().size(), 2UL,
platform::errors::InvalidArgument(
"The fc's weight should be a matrix with 2 dims, but "
"it's %d-dimensional.",
Y_t->dims().size())); // a matrix
size_t n_output = Y_t->dims()[1];
int m = Y_t->dims()[0];
......
......@@ -56,15 +56,19 @@ class Pool2dOpConverter : public OpConverter {
VLOG(4)
<< "convert a fluid pool2d op to tensorrt pool2d layer without bias";
framework::OpDesc op_desc(op, nullptr);
// Declare inputs
PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1);
PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1);
PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL,
platform::errors::InvalidArgument(
"TRT Pool2d expect 1 input, but got %d input.",
op_desc.Input("X").size()));
PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1UL,
platform::errors::InvalidArgument(
"TRT Pool2d expect 1 Output, but got %d output.",
op_desc.Output("Out").size()));
auto *input1 = engine_->GetITensor(op_desc.Input("X")[0]);
nvinfer1::Dims input_shape = input1->getDimensions();
int input_dims = input_shape.nbDims;
PADDLE_ENFORCE_EQ(input_dims, 3UL);
bool global_pooling = boost::get<bool>(op_desc.GetAttr("global_pooling"));
std::string pool_type =
boost::get<std::string>(op_desc.GetAttr("pooling_type"));
......@@ -89,7 +93,9 @@ class Pool2dOpConverter : public OpConverter {
nv_pool_type = nvinfer1::PoolingType::kAVERAGE;
plugin_pool_type = plugin::PoolPlugin::PoolType::avg;
} else {
PADDLE_THROW("TensorRT unsupported pooling type!");
PADDLE_THROW(platform::errors::Fatal(
"Wrong pool op type, the trt do not support the %s pool type.",
pool_type));
}
nvinfer1::DimsHW nv_ksize(ksize[0], ksize[1]);
......@@ -112,7 +118,9 @@ class Pool2dOpConverter : public OpConverter {
auto *layer = TRT_ENGINE_ADD_LAYER(
engine_, Pooling, *const_cast<nvinfer1::ITensor *>(input1),
nv_pool_type, nv_ksize);
PADDLE_ENFORCE_NOT_NULL(layer, "pool layer could not be created.");
PADDLE_ENFORCE_NOT_NULL(
layer, platform::errors::Fatal(
"trt pool layer in converter could not be created."));
auto output_name = op_desc.Output("Out")[0];
layer->setName(("pool2d (Output: " + output_name + ")").c_str());
layer->getOutput(0)->setName(output_name.c_str());
......@@ -138,13 +146,17 @@ class Pool2dOpConverter : public OpConverter {
engine_, Padding, *const_cast<nvinfer1::ITensor *>(input1), pre_pad,
post_pad);
PADDLE_ENFORCE_NOT_NULL(
pad_layer, "pad layer in poolOp converter could not be created.");
pad_layer,
platform::errors::Fatal(
"pad layer in poolOp converter could not be created."));
input1 = pad_layer->getOutput(0);
}
auto *pool_layer = TRT_ENGINE_ADD_LAYER(
engine_, Pooling, *const_cast<nvinfer1::ITensor *>(input1),
nv_pool_type, nv_ksize);
PADDLE_ENFORCE_NOT_NULL(pool_layer, "pool layer could not be created.");
PADDLE_ENFORCE_NOT_NULL(
pool_layer, platform::errors::Fatal(
"trt pool layer in converter could not be created."));
pool_layer->setStride(nv_strides);
pool_layer->setPadding(nv_paddings);
layer = pool_layer;
......@@ -159,9 +171,11 @@ class Pool2dOpConverter : public OpConverter {
plugin::PoolPlugin *plugin =
new plugin::PoolPlugin(ceil_mode, plugin_pool_type, adaptive, ksize,
strides, paddings, input_shape_v);
PADDLE_ENFORCE_NOT_NULL(plugin->getPluginType(),
"The plugin used must not be null");
auto *pool_layer = engine_->AddPlugin(&input1, 1, plugin);
PADDLE_ENFORCE_NOT_NULL(
pool_layer,
platform::errors::Fatal(
"trt pool plugin layer in converter could not be created."));
layer = pool_layer;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册