diff --git a/paddle/fluid/inference/tensorrt/convert/roll_op.cc b/paddle/fluid/inference/tensorrt/convert/roll_op.cc index df320c0abcdb65f5352de0187d2bc9842c8f4514..a94007999156a2af53c31a4a5fd2bc41a88452e9 100644 --- a/paddle/fluid/inference/tensorrt/convert/roll_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/roll_op.cc @@ -28,7 +28,7 @@ namespace paddle { namespace inference { namespace tensorrt { /* - * Stack converter from fluid to tensorRT. + * Roll converter from fluid to tensorRT. */ class RollOpConverter : public OpConverter { public: @@ -53,7 +53,8 @@ class RollOpConverter : public OpConverter { } int axis_size = axis.size(); for (int i = 0; i < axis_size; i++) { - start.d[axis[i]] = (-shifts[i]) % input_dims.d[axis[i]]; + start.d[axis[i]] = + (input_dims.d[axis[i]] - shifts[i]) % input_dims.d[axis[i]]; } nvinfer1::Dims stride; @@ -70,11 +71,9 @@ class RollOpConverter : public OpConverter { auto output_name = op_desc.Output("Out")[0]; - auto shape_layer = TRT_ENGINE_ADD_LAYER(engine_, Shape, *input); - auto* layer = TRT_ENGINE_ADD_LAYER(engine_, Slice, *input, start, size, stride); - layer->setInput(2, *shape_layer->getOutput(0)); + layer->setInput(2, *Shape(input)); #if IS_TRT_VERSION_GE(7000) layer->setMode(nvinfer1::SliceMode::kWRAP); #endif diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roll.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roll.py index 8217b3e8d8506e602abcb0f77eb96e4962dcf5c5..a85cafbb5cce0f5b5d29841f8283762c77899dcf 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roll.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roll.py @@ -32,7 +32,7 @@ class TrtConvertRollTest(TrtLayerAutoScanTest): def sample_program_configs(self): def generate_input1(attrs: List[Dict[str, Any]]): - return np.ones([1, 56, 56, 192]).astype(np.float32) + return np.random.random([1, 56, 56, 192]).astype(np.float32) for axis in [[1, 2]]: for shifts in [[-1, -1], [-3, -3]]: