未验证 提交 0d9185b9 编写于 作者: W wenbin 提交者: GitHub

Unary (#49914)

* disable integer

* disable integer

* add cast layer
上级 c3cd8502
......@@ -52,13 +52,33 @@ class UnaryOpConverter : public OpConverter {
nvinfer1::ITensor* input_tensor =
engine_->GetITensor(op_desc.Input("X")[0]);
auto op_pair = ops.find(op_type_);
nvinfer1::IUnaryLayer* layer = nullptr;
nvinfer1::ILayer* layer;
#if !IS_TRT_VERSION_GE(8500)
nvinfer1::DataType org_type = input_tensor->getType();
bool cast = org_type == nvinfer1::DataType::kINT8 ||
org_type == nvinfer1::DataType::kINT32;
if (cast) {
layer = TRT_ENGINE_ADD_LAYER(engine_, Identity, *input_tensor);
if (engine_->precision() == AnalysisConfig::Precision::kFloat32) {
layer->setOutputType(0, nvinfer1::DataType::kFLOAT);
} else {
layer->setOutputType(0, nvinfer1::DataType::kHALF);
}
input_tensor = layer->getOutput(0);
}
#endif
for (auto trt_op : op_pair->second) {
layer = TRT_ENGINE_ADD_LAYER(engine_, Unary, *input_tensor, trt_op);
input_tensor = layer->getOutput(0);
}
#if !IS_TRT_VERSION_GE(8500)
// type restore
if (cast) {
layer = TRT_ENGINE_ADD_LAYER(engine_, Identity, *input_tensor);
layer->setOutputType(0, org_type);
input_tensor = layer->getOutput(0);
}
#endif
auto output_name = op_desc.Output("Out")[0];
RreplenishLayerAndOutput(layer, op_type_, {output_name}, test_mode);
}
......
......@@ -42,6 +42,14 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
else:
return np.random.random([batch, 3, 32, 32]).astype(np.float32)
def generate_int_input(dims, batch, attrs: List[Dict[str, Any]]):
if dims == 2:
return np.random.random([3, 32]).astype(np.int32)
elif dims == 3:
return np.random.random([3, 32, 32]).astype(np.int32)
else:
return np.random.random([batch, 3, 32, 32]).astype(np.int32)
for dims in [2, 3, 4]:
for batch in [1, 4]:
for op_type in [
......@@ -96,6 +104,39 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
yield program_config
for op_type in [
"exp",
"abs",
]:
self.dims = dims
self.op_type = op_type
dics = [{}]
ops_config = [
{
"op_type": op_type,
"op_inputs": {"X": ["input_data"]},
"op_outputs": {"Out": ["output_data"]},
"op_attrs": dics[0],
}
]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data": TensorConfig(
data_gen=partial(
generate_int_input, dims, batch, dics
)
)
},
outputs=["output_data"],
)
yield program_config
def sample_predictor_configs(
self, program_config
) -> (paddle_infer.Config, List[int], float):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册