未验证 提交 80753755 编写于 作者: Z Zhang Jun 提交者: GitHub

[inference] update trt convert reduce op&ut,test=develop (#39088)

* [inference] update convert reduce op&ut,test=develop

* update

* update

* update

* add int32 support

* add int32 support

* add comments

* trt < 7.0 do not support int32

* test=develop

* update

* test=develop
上级 6e871dbc
...@@ -83,6 +83,8 @@ class ReduceOpConverter : public OpConverter { ...@@ -83,6 +83,8 @@ class ReduceOpConverter : public OpConverter {
} }
auto output_name = op_desc.Output("Out")[0]; auto output_name = op_desc.Output("Out")[0];
// Ensure that the output type and input type are consistent.
layer->getOutput(0)->setType(layer->getInput(0)->getType());
RreplenishLayerAndOutput(layer, op_type, {output_name}, test_mode); RreplenishLayerAndOutput(layer, op_type, {output_name}, test_mode);
} }
......
...@@ -1464,30 +1464,48 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8, ...@@ -1464,30 +1464,48 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
VLOG(3) << "the " << op_type VLOG(3) << "the " << op_type
<< " does not have attr (keep_dim or dim or " << " does not have attr (keep_dim or dim or "
"reduce_all)"; "reduce_all)";
std::cout << "attr " << desc.HasAttr("keep_dim") << " " return false;
<< desc.HasAttr("dim") << " " << desc.HasAttr("reduce_all"); }
auto* block = desc.Block();
if (block == nullptr) {
VLOG(3) << "The block desc is nullptr, we can't continue to analyze. "
"Developers need to check whether block_desc is passed in "
"the pass.";
return false; return false;
} }
// The batch size dimension cannot be reduced if it's not dynamic shape. // The batch size dimension cannot be reduced if it's not dynamic shape.
auto* x_var_desc = block->FindVar(desc.Input("X")[0]);
if (!with_dynamic_shape) { if (!with_dynamic_shape) {
if (BOOST_GET_CONST(bool, desc.GetAttr("reduce_all"))) return false; if (BOOST_GET_CONST(bool, desc.GetAttr("reduce_all"))) return false;
std::vector<int32_t> dim = std::vector<int32_t> dim =
BOOST_GET_CONST(std::vector<int32_t>, desc.GetAttr("dim")); BOOST_GET_CONST(std::vector<int32_t>, desc.GetAttr("dim"));
const auto input_shape = x_var_desc->GetShape();
for (auto x : dim) { for (auto x : dim) {
if (!x) return false; if (x == 0 || (x + input_shape.size() == 0)) return false;
} }
} else { } else {
if (BOOST_GET_CONST(bool, desc.GetAttr("reduce_all")) && if (BOOST_GET_CONST(bool, desc.GetAttr("reduce_all")) &&
!BOOST_GET_CONST(bool, desc.GetAttr("keep_dim"))) !BOOST_GET_CONST(bool, desc.GetAttr("keep_dim")))
return false; return false;
} }
if (desc.HasAttr("out_dtype")) {
int out_dtype = BOOST_GET_CONST(int32_t, desc.GetAttr("out_dtype")); auto dtype = x_var_desc->GetDataType();
if (out_dtype != -1) { #if IS_TRT_VERSION_GE(7000)
if (dtype != framework::proto::VarType::INT32 &&
dtype != framework::proto::VarType::FP32) {
VLOG(3) << "reduce op input data type must be int32 or float32";
return false; return false;
} }
#else
if (dtype != framework::proto::VarType::FP32) {
VLOG(3)
<< "reduce op input data type must be float32 using TensorRT < 7.0";
return false;
} }
#endif
} }
#if IS_TRT_VERSION_GE(7000) #if IS_TRT_VERSION_GE(7000)
if (op_type == "tile") { if (op_type == "tile") {
......
...@@ -22,4 +22,6 @@ REGISTER_OP_CUDA_KERNEL( ...@@ -22,4 +22,6 @@ REGISTER_OP_CUDA_KERNEL(
ops::ReduceCudaKernel<paddle::platform::float16, kps::AddFunctor, ops::ReduceCudaKernel<paddle::platform::float16, kps::AddFunctor,
kps::DivideFunctor>, kps::DivideFunctor>,
ops::ReduceCudaKernel<float, kps::AddFunctor, kps::DivideFunctor>, ops::ReduceCudaKernel<float, kps::AddFunctor, kps::DivideFunctor>,
ops::ReduceCudaKernel<double, kps::AddFunctor, kps::DivideFunctor>); ops::ReduceCudaKernel<double, kps::AddFunctor, kps::DivideFunctor>,
ops::ReduceCudaKernel<int, kps::AddFunctor, kps::DivideFunctor>,
ops::ReduceCudaKernel<int64_t, kps::AddFunctor, kps::DivideFunctor>);
...@@ -36,26 +36,32 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest): ...@@ -36,26 +36,32 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest):
return False return False
if len(attrs[0]["dim"]) == 0: if len(attrs[0]["dim"]) == 0:
return False return False
## skip not use
if attrs[0]["out_dtype"] != -1: ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 < 7000:
if attrs[0]['out_dtype'] == 2:
return False return False
return True return True
def sample_program_configs(self): def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]]): def generate_input1(dtype, attrs: List[Dict[str, Any]]):
if dtype == -1 or dtype == 5:
return np.random.random([1, 3, 64, 64]).astype(np.float32) return np.random.random([1, 3, 64, 64]).astype(np.float32)
elif dtype == 2:
return np.random.random([1, 3, 64, 64]).astype(np.int32)
for keep_dim in [False, True]: for keep_dim in [True, False]:
for dim in [[], [1], [0], [0, 1], [1, 2, 3], [-2, 0, 3], [-3], for dim in [[], [1], [0], [0, 1], [1, 2, 3], [-2, 0, 3], [-3],
[-4, 1], [3, 4, 5]]: [-4, 1], [3, 4, 5]]:
for reduce_all in [False, True]: for reduce_all in [True, False]:
for out_dtype in [-1, 0, 1]: for out_dtype in [-1, 2, 5]:
dics = [{ dics = [{
"keep_dim": keep_dim, "keep_dim": keep_dim,
"dim": dim, "dim": dim,
"reduce_all": reduce_all, "reduce_all": reduce_all,
"out_dtype": out_dtype "out_dtype": out_dtype,
"in_dtype": out_dtype,
}, {}] }, {}]
ops_config = [{ ops_config = [{
...@@ -75,7 +81,7 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest): ...@@ -75,7 +81,7 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest):
weights={}, weights={},
inputs={ inputs={
"input_data": TensorConfig(data_gen=partial( "input_data": TensorConfig(data_gen=partial(
generate_input1, dics)) generate_input1, out_dtype, dics))
}, },
outputs=["reduce_output_data"]) outputs=["reduce_output_data"])
...@@ -134,16 +140,6 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest): ...@@ -134,16 +140,6 @@ class TrtConvertReduceMeanTest(TrtLayerAutoScanTest):
pass pass
def add_skip_trt_case(self): def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if program_config.ops[0].attrs['out_dtype'] != -1:
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"NOT Implemented: we will add out_dtype not equal to -1 in the future"
)
pass pass
def test(self): def test(self):
......
...@@ -37,26 +37,27 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest): ...@@ -37,26 +37,27 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest):
return False return False
if len(attrs[0]["dim"]) == 0: if len(attrs[0]["dim"]) == 0:
return False return False
## skip not use
if attrs[0]["out_dtype"] != -1:
return False
return True return True
def sample_program_configs(self): def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]]): def generate_input1(dtype, attrs: List[Dict[str, Any]]):
if dtype == -1 or dtype == 5:
return np.random.random([1, 3, 64, 64]).astype(np.float32) return np.random.random([1, 3, 64, 64]).astype(np.float32)
elif dtype == 2:
return np.random.random([1, 3, 64, 64]).astype(np.int32)
for keep_dim in [False, True]: for keep_dim in [True, False]:
for dim in [[], [1], [0], [0, 1], [1, 2, 3], [-2, 0, 3], [-3], for dim in [[], [1], [0], [0, 1], [1, 2, 3], [-2, 0, 3], [-3],
[-4, 1], [3, 4, 5]]: [-4, 1], [3, 4, 5]]:
for reduce_all in [False, True]: for reduce_all in [True, False]:
for out_dtype in [-1, 0, 1]: for out_dtype in [-1, 2, 5]:
dics = [{ dics = [{
"keep_dim": keep_dim, "keep_dim": keep_dim,
"dim": dim, "dim": dim,
"reduce_all": reduce_all, "reduce_all": reduce_all,
"out_dtype": out_dtype "out_dtype": out_dtype,
"in_dtype": out_dtype,
}, {}] }, {}]
ops_config = [{ ops_config = [{
...@@ -76,7 +77,7 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest): ...@@ -76,7 +77,7 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest):
weights={}, weights={},
inputs={ inputs={
"input_data": TensorConfig(data_gen=partial( "input_data": TensorConfig(data_gen=partial(
generate_input1, dics)) generate_input1, out_dtype, dics))
}, },
outputs=["reduce_output_data"]) outputs=["reduce_output_data"])
...@@ -134,16 +135,6 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest): ...@@ -134,16 +135,6 @@ class TrtConvertReduceSumTest(TrtLayerAutoScanTest):
pass pass
def add_skip_trt_case(self): def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if program_config.ops[0].attrs['out_dtype'] != -1:
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"NOT Implemented: we will add out_dtype not equal to -1 in the future"
)
pass pass
def test(self): def test(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册