未验证 提交 bb4f7777 编写于 作者: B bukejiyu 提交者: GitHub

[inference][trt]unary, bitwise_not,one_hot op and inspector ci adjust for...

[inference][trt]unary, bitwise_not,one_hot op and inspector ci adjust for TensorRT8.6 (#54251) (#54579)

* unary bitwise_not adapter tensorRT8.6 in Paddle-TensorRT

* one_hot_op ci fix

* Update test_trt_inspector.py

delete log
上级 35de47b3
...@@ -43,7 +43,7 @@ class UnaryOpConverter : public OpConverter { ...@@ -43,7 +43,7 @@ class UnaryOpConverter : public OpConverter {
engine_->GetITensor(op_desc.Input("X")[0]); engine_->GetITensor(op_desc.Input("X")[0]);
auto op_pair = ops.find(op_type_); auto op_pair = ops.find(op_type_);
nvinfer1::ILayer* layer = nullptr; nvinfer1::ILayer* layer = nullptr;
#if !IS_TRT_VERSION_GE(8500)
nvinfer1::DataType org_type = input_tensor->getType(); nvinfer1::DataType org_type = input_tensor->getType();
bool cast = org_type == nvinfer1::DataType::kINT8 || bool cast = org_type == nvinfer1::DataType::kINT8 ||
org_type == nvinfer1::DataType::kINT32; org_type == nvinfer1::DataType::kINT32;
...@@ -56,19 +56,19 @@ class UnaryOpConverter : public OpConverter { ...@@ -56,19 +56,19 @@ class UnaryOpConverter : public OpConverter {
} }
input_tensor = layer->getOutput(0); input_tensor = layer->getOutput(0);
} }
#endif
for (auto trt_op : op_pair->second) { for (auto trt_op : op_pair->second) {
layer = TRT_ENGINE_ADD_LAYER(engine_, Unary, *input_tensor, trt_op); layer = TRT_ENGINE_ADD_LAYER(engine_, Unary, *input_tensor, trt_op);
input_tensor = layer->getOutput(0); input_tensor = layer->getOutput(0);
} }
#if !IS_TRT_VERSION_GE(8500)
// type restore // type restore
if (cast) { if (cast) {
layer = TRT_ENGINE_ADD_LAYER(engine_, Identity, *input_tensor); layer = TRT_ENGINE_ADD_LAYER(engine_, Identity, *input_tensor);
layer->setOutputType(0, org_type); layer->setOutputType(0, org_type);
input_tensor = layer->getOutput(0); input_tensor = layer->getOutput(0);
} }
#endif
auto output_name = op_desc.Output("Out")[0]; auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, op_type_, {output_name}, test_mode); RreplenishLayerAndOutput(layer, op_type_, {output_name}, test_mode);
} }
......
...@@ -1881,19 +1881,29 @@ struct SimpleOpTypeSetTeller : public Teller { ...@@ -1881,19 +1881,29 @@ struct SimpleOpTypeSetTeller : public Teller {
} }
if (op_type == "bitwise_not") { if (op_type == "bitwise_not") {
#if !IS_TRT_VERSION_GE(8400)
auto* block = desc.Block(); auto* block = desc.Block();
auto x_var_name = desc.Input("X")[0]; auto x_var_name = desc.Input("X")[0];
auto* x_var_desc = block->FindVar(x_var_name); auto* x_var_desc = block->FindVar(x_var_name);
auto dtype = x_var_desc->GetDataType(); auto dtype = x_var_desc->GetDataType();
if (dtype == framework::proto::VarType::BOOL || if (dtype == framework::proto::VarType::INT8 ||
dtype == framework::proto::VarType::INT8 ||
dtype == framework::proto::VarType::UINT8) { dtype == framework::proto::VarType::UINT8) {
VLOG(3) << "BOOL / INT8 / UINT8 type support requires TensorRT 8.4"; VLOG(3) << "INT8 / UINT8 type convert to trt is not supported";
return false;
}
if (dtype == framework::proto::VarType::BOOL) {
#if !IS_TRT_VERSION_GE(8400)
VLOG(3) << "BOOL type support requires TensorRT 8.4";
return false;
#elif !IS_TRT_VERSION_GE(8600)
const auto x_shape = x_var_desc->GetShape();
if (x_shape.size() == 0) {
VLOG(3)
<< "BOOL type does not support 0 dim input when TensorRT < 8.6.";
return false; return false;
} }
#endif #endif
} }
}
if (op_type == "one_hot" || op_type == "one_hot_v2") { if (op_type == "one_hot" || op_type == "one_hot_v2") {
#if IS_TRT_VERSION_LT(8510) #if IS_TRT_VERSION_LT(8510)
......
...@@ -31,7 +31,9 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -31,7 +31,9 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
self.trt_param.workspace_size = 1073741824 self.trt_param.workspace_size = 1073741824
def generate_input1(dims, batch, attrs: List[Dict[str, Any]]): def generate_input1(dims, batch, attrs: List[Dict[str, Any]]):
if dims == 1: if dims == 0:
return np.random.random([]).astype(np.bool8)
elif dims == 1:
return np.random.random([32]).astype(np.bool8) return np.random.random([32]).astype(np.bool8)
elif dims == 2: elif dims == 2:
return np.random.random([3, 32]).astype(np.int8) return np.random.random([3, 32]).astype(np.int8)
...@@ -40,7 +42,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -40,7 +42,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
else: else:
return np.random.random([batch, 3, 32, 32]).astype(np.int64) return np.random.random([batch, 3, 32, 32]).astype(np.int64)
for dims in [1, 2, 3, 4]: for dims in [0, 1, 2, 3, 4]:
for batch in [1, 4]: for batch in [1, 4]:
self.dims = dims self.dims = dims
dics = [{}] dics = [{}]
...@@ -65,13 +67,20 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -65,13 +67,20 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
}, },
outputs=["output_data"], outputs=["output_data"],
) )
program_config.input_type = program_config.inputs[
'input_data'
].dtype
yield program_config yield program_config
def sample_predictor_configs( def sample_predictor_configs(
self, program_config self, program_config
) -> (paddle_infer.Config, List[int], float): ) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if self.dims == 1: if self.dims == 0:
self.dynamic_shape.min_input_shape = {"input_data": []}
self.dynamic_shape.max_input_shape = {"input_data": []}
self.dynamic_shape.opt_input_shape = {"input_data": []}
elif self.dims == 1:
self.dynamic_shape.min_input_shape = {"input_data": [1]} self.dynamic_shape.min_input_shape = {"input_data": [1]}
self.dynamic_shape.max_input_shape = {"input_data": [64]} self.dynamic_shape.max_input_shape = {"input_data": [64]}
self.dynamic_shape.opt_input_shape = {"input_data": [32]} self.dynamic_shape.opt_input_shape = {"input_data": [32]}
...@@ -102,16 +111,19 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -102,16 +111,19 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
ver = paddle_infer.get_trt_compile_version() ver = paddle_infer.get_trt_compile_version()
trt_version = ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 trt_version = ver[0] * 1000 + ver[1] * 100 + ver[2] * 10
if trt_version >= 8400: if not dynamic_shape:
if self.dims == 1: if self.dims == 1 or self.dims == 0:
return 0, 3 return 0, 3
if program_config.input_type in ['int8', 'uint8']:
return 0, 3
elif program_config.input_type == 'bool':
if trt_version <= 8600 and self.dims == 0:
return 0, 3
elif trt_version <= 8400:
return 0, 3
else:
return 1, 2 return 1, 2
else: else:
if self.dims <= 2 or (
program_config.inputs['input_data'].dtype
in ['bool', 'int8', 'uint8']
):
return 0, 3
return 1, 2 return 1, 2
attrs = [ attrs = [
......
...@@ -52,10 +52,10 @@ class TrtConvertOneHotTest(TrtLayerAutoScanTest): ...@@ -52,10 +52,10 @@ class TrtConvertOneHotTest(TrtLayerAutoScanTest):
dics = [{"dtype": 5, "depth": 10}, {}] dics = [{"dtype": 5, "depth": 10}, {}]
ops_config = [ ops_config = [
{ {
"op_type": "one_hot", "op_type": "one_hot_v2",
"op_inputs": { "op_inputs": {
"X": ["input_x_data"], "X": ["indices_tensor"],
"depth_tensor": ["input_depth_data"], "depth_tensor": ["depth_tensor_data"],
}, },
"op_outputs": {"Out": ["output_data"]}, "op_outputs": {"Out": ["output_data"]},
"op_attrs": dics[0], "op_attrs": dics[0],
...@@ -67,7 +67,7 @@ class TrtConvertOneHotTest(TrtLayerAutoScanTest): ...@@ -67,7 +67,7 @@ class TrtConvertOneHotTest(TrtLayerAutoScanTest):
program_config = ProgramConfig( program_config = ProgramConfig(
ops=ops, ops=ops,
weights={ weights={
"depth_tensor": TensorConfig( "depth_tensor_data": TensorConfig(
data_gen=partial(generate_depth, dims, batch) data_gen=partial(generate_depth, dims, batch)
), ),
}, },
...@@ -87,43 +87,43 @@ class TrtConvertOneHotTest(TrtLayerAutoScanTest): ...@@ -87,43 +87,43 @@ class TrtConvertOneHotTest(TrtLayerAutoScanTest):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if self.dims == 1: if self.dims == 1:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_x_data": [1], "indices_tensor": [1],
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_x_data": [2], "indices_tensor": [2],
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_x_data": [1], "indices_tensor": [1],
} }
elif self.dims == 2: elif self.dims == 2:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_x_data": [1, 4], "indices_tensor": [1, 4],
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_x_data": [2, 4], "indices_tensor": [2, 4],
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_x_data": [1, 4], "indices_tensor": [1, 4],
} }
elif self.dims == 3: elif self.dims == 3:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_x_data": [1, 4, 6], "indices_tensor": [1, 4, 6],
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_x_data": [2, 4, 6], "indices_tensor": [2, 4, 6],
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_x_data": [1, 4, 6], "indices_tensor": [1, 4, 6],
} }
elif self.dims == 4: elif self.dims == 4:
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"input_x_data": [1, 4, 6, 8], "indices_tensor": [1, 4, 6, 8],
} }
self.dynamic_shape.max_input_shape = { self.dynamic_shape.max_input_shape = {
"input_x_data": [2, 4, 6, 8], "indices_tensor": [2, 4, 6, 8],
} }
self.dynamic_shape.opt_input_shape = { self.dynamic_shape.opt_input_shape = {
"input_x_data": [1, 4, 6, 8], "indices_tensor": [1, 4, 6, 8],
} }
def clear_dynamic_shape(): def clear_dynamic_shape():
......
...@@ -37,7 +37,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -37,7 +37,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def generate_input1(dims, batch, attrs: List[Dict[str, Any]]): def generate_input1(dims, batch, attrs: List[Dict[str, Any]]):
if dims == 0: if dims == 0:
return np.random.random([]).astype(np.float32) return np.random.random([]).astype(np.float32)
if dims == 2: elif dims == 2:
return np.random.random([3, 32]).astype(np.float32) return np.random.random([3, 32]).astype(np.float32)
elif dims == 3: elif dims == 3:
return np.random.random([3, 32, 32]).astype(np.float32) return np.random.random([3, 32, 32]).astype(np.float32)
...@@ -47,7 +47,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -47,7 +47,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def generate_int_input(dims, batch, attrs: List[Dict[str, Any]]): def generate_int_input(dims, batch, attrs: List[Dict[str, Any]]):
if dims == 0: if dims == 0:
return np.random.random([]).astype(np.int32) return np.random.random([]).astype(np.int32)
if dims == 2: elif dims == 2:
return np.random.random([3, 32]).astype(np.int32) return np.random.random([3, 32]).astype(np.int32)
elif dims == 3: elif dims == 3:
return np.random.random([3, 32, 32]).astype(np.int32) return np.random.random([3, 32, 32]).astype(np.int32)
...@@ -146,7 +146,11 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -146,7 +146,11 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
self, program_config self, program_config
) -> (paddle_infer.Config, List[int], float): ) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if self.dims == 1: if self.dims == 0:
self.dynamic_shape.min_input_shape = {"input_data": []}
self.dynamic_shape.max_input_shape = {"input_data": []}
self.dynamic_shape.opt_input_shape = {"input_data": []}
elif self.dims == 1:
self.dynamic_shape.min_input_shape = {"input_data": [1]} self.dynamic_shape.min_input_shape = {"input_data": [1]}
self.dynamic_shape.max_input_shape = {"input_data": [64]} self.dynamic_shape.max_input_shape = {"input_data": [64]}
self.dynamic_shape.opt_input_shape = {"input_data": [32]} self.dynamic_shape.opt_input_shape = {"input_data": [32]}
...@@ -193,6 +197,8 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -193,6 +197,8 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
and self.dims == 0 and self.dims == 0
): ):
return 0, 3 return 0, 3
if not dynamic_shape and (self == 1 or self.dims == 0):
return 0, 3
return 1, 2 return 1, 2
attrs = [ attrs = [
......
...@@ -72,7 +72,7 @@ class TensorRTInspectorTest(InferencePassTest): ...@@ -72,7 +72,7 @@ class TensorRTInspectorTest(InferencePassTest):
self.assertTrue('====== engine info ======' in engine_info) self.assertTrue('====== engine info ======' in engine_info)
self.assertTrue('====== engine info end ======' in engine_info) self.assertTrue('====== engine info end ======' in engine_info)
self.assertTrue('matmul' in engine_info) self.assertTrue('matmul' in engine_info)
self.assertTrue('LayerType: Scale' in engine_info) self.assertTrue('"LayerType": "Scale"' in engine_info)
self.assertTrue('batch_norm' in engine_info) self.assertTrue('batch_norm' in engine_info)
else: else:
self.assertTrue( self.assertTrue(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册