未验证 提交 7766721a 编写于 作者: W wenbin 提交者: GitHub

disable conv plugin in TRT old versions (#33198)

上级 d7d3090f
......@@ -52,11 +52,6 @@ class ActivationOpConverter : public OpConverter {
engine_->GetITensor(op_desc.Input("X")[0]);
auto op_pair = ops.find(op_type_);
if (op_pair == ops.end()) {
PADDLE_THROW(platform::errors::Fatal(
"Wrong activation op type, the trt do not support the %s act type.",
op_type_));
}
nvinfer1::IActivationLayer* layer = TRT_ENGINE_ADD_LAYER(
engine_, Activation, *const_cast<nvinfer1::ITensor*>(input_tensor),
......
......@@ -55,16 +55,6 @@ class AffineChannelOpConverter : public OpConverter {
auto* bias_t = bias_v->GetMutable<framework::LoDTensor>();
float* bias_ptr = engine_->GetWeightCPUData(bias_name, bias_t, false);
auto data_layout = framework::StringToDataLayout(
BOOST_GET_CONST(std::string, op_desc.GetAttr("data_layout")));
PADDLE_ENFORCE_EQ(
data_layout, framework::DataLayout::kNCHW,
platform::errors::InvalidArgument(
"TensorRT affine channel converter can only convert NCHW format. "
"Other format should be run in fluid mode. Report a bug on github "
"issue if you see this line."));
// tensorrt scalend layer only support spatial dims >= 2,
// so nhwc is not availabe (spatial dims == 0)
const int channel_axis = engine_->with_dynamic_shape();
......
......@@ -25,10 +25,6 @@ static bool CheckDims(const nvinfer1::Dims& dims_x,
return false;
}
for (int i = 0; i < dims_x.nbDims; i++) {
// conservative judgment
if (dims_x.d[i] == -1 || dims_y.d[i] == -1) {
return false;
}
if (dims_x.d[i] != dims_y.d[i]) {
return false;
}
......
......@@ -225,6 +225,27 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
<< desc.Output("Output").size() << " output.";
return false;
}
// strides > 1 and 'SAME' is only supported by trt7.0 above
#if !IS_TRT_VERSION_GE(7000)
if (op_type == "conv2d" || op_type == "conv2d_fusion" ||
op_type == "depthwise_conv2d") {
if (desc.HasAttr("padding_algorithm") && with_dynamic_shape) {
auto padding_algorithm =
BOOST_GET_CONST(std::string, desc.GetAttr("padding_algorithm"));
if (padding_algorithm == "SAME" && desc.HasAttr("strides")) {
const std::vector<int> strides =
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("strides"));
// there is no issue if strides.size() less than 2
if (strides.size() > 1) {
for (size_t i = 0; i < strides.size(); i++) {
if (strides[i] > 1) return false;
}
}
}
}
}
#endif
}
if (op_type == "matmul") {
......
......@@ -161,5 +161,70 @@ class TensorRTSubgraphPassDepthwiseConvTransposeTest(
self.use_cudnn = False
class DynamicShapeTensorRTSubgraphPassConvTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 6, -1, -1], dtype="float32")
conv_out = fluid.layers.conv2d(
input=data,
num_filters=self.conv_num_filters,
filter_size=self.conv_filter_size,
groups=self.conv_groups,
padding=self.conv_padding,
bias_attr=False,
use_cudnn=self.use_cudnn,
stride=self.stride,
act=None)
self.feeds = {
"data": np.random.random([32, 6, 64, 64]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = DynamicShapeTensorRTSubgraphPassConvTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)
self.dynamic_shape_params = DynamicShapeTensorRTSubgraphPassConvTest.DynamicShapeParam(
{
"conv2d_0.tmp_0": [1, 6, 8, 8],
"data": [1, 6, 8, 8],
"depthwise_conv2d_0.tmp_0": [1, 6, 8, 8]
}, {
"conv2d_0.tmp_0": [32, 6, 64, 64],
"data": [32, 6, 64, 64],
"depthwise_conv2d_0.tmp_0": [32, 6, 64, 64]
}, {
"conv2d_0.tmp_0": [16, 6, 16, 16],
"data": [16, 6, 16, 16],
"depthwise_conv2d_0.tmp_0": [32, 6, 64, 64]
}, False)
self.fetch_list = [conv_out]
def set_params(self):
self.conv_num_filters = 6
self.conv_filter_size = 6
self.conv_groups = 6
self.conv_padding = 'SAME'
self.use_cudnn = True
self.stride = [2, 2]
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class DynamicShapeTensorRTSubgraphPassDepthwiseConvTransposeTest(
DynamicShapeTensorRTSubgraphPassConvTest):
def set_params(self):
self.conv_num_filters = 6
self.conv_filter_size = 6
self.conv_groups = 6
self.conv_padding = 'SAME'
self.use_cudnn = False
self.stride = [2, 2]
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册