未验证 提交 46f8e882 编写于 作者: W weishengying 提交者: GitHub

Add symbolic shape deduction function for unfold, scatter_nd_add, p_norm,...

Add symbolic shape deduction function for unfold, scatter_nd_add, p_norm, grid_sampler, pad3d, etc (#46291)
上级 44d15ab1
......@@ -105,10 +105,287 @@ nvinfer1::DimsExprs InstanceNormInferMeta(
return x_dims;
}
inline const nvinfer1::IDimensionExpr* CalcOutputSize(
const nvinfer1::IDimensionExpr* input_size,
const nvinfer1::IDimensionExpr* filter_size,
const nvinfer1::IDimensionExpr* dilation,
const nvinfer1::IDimensionExpr* padding1,
const nvinfer1::IDimensionExpr* padding2,
const nvinfer1::IDimensionExpr* stride,
nvinfer1::IExprBuilder& expr_builder // NOLINT
) {
// dkernel = dilation * (filter_size - 1) + 1;
const nvinfer1::IDimensionExpr* dkernel = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kPROD,
*dilation,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUB,
*filter_size,
*expr_builder.constant(1))),
*expr_builder.constant(1));
// output_size = (input_size + padding1 + padding2 - dkernel) / stride + 1;
const nvinfer1::IDimensionExpr* tmp = expr_builder.operation(
nvinfer1::DimensionOperation::kSUB,
*expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kSUM, *input_size, *padding1),
*padding2),
*dkernel);
const nvinfer1::IDimensionExpr* output_size = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(
nvinfer1::DimensionOperation::kFLOOR_DIV, *tmp, *stride),
*expr_builder.constant(1));
return output_size;
}
nvinfer1::DimsExprs UnflodInferMeta(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder, // NOLINT
const framework::OpDesc& op_desc) {
PADDLE_ENFORCE_EQ(
nb_inputs,
1,
phi::errors::InvalidArgument("inputs of unfold should be equal to 1, "
"But received (%s)",
nb_inputs));
const nvinfer1::DimsExprs in_dims = inputs[0];
std::vector<const nvinfer1::IDimensionExpr*> out_dims;
out_dims.push_back(in_dims.d[0]);
auto kernel_sizes =
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("kernel_sizes"));
auto dilations =
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("dilations"));
auto paddings =
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("paddings"));
auto strides = PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("strides"));
// output_channels = in_dims[1] * kernel_sizes[0] * kernel_sizes[1];
const nvinfer1::IDimensionExpr* output_channels = expr_builder.operation(
nvinfer1::DimensionOperation::kPROD,
*in_dims.d[1],
*expr_builder.operation(nvinfer1::DimensionOperation::kPROD,
*expr_builder.constant(kernel_sizes[0]),
*expr_builder.constant(kernel_sizes[1])));
out_dims.push_back(output_channels);
const nvinfer1::IDimensionExpr* output_height =
CalcOutputSize(in_dims.d[2],
expr_builder.constant(kernel_sizes[0]),
expr_builder.constant(dilations[0]),
expr_builder.constant(paddings[0]),
expr_builder.constant(paddings[2]),
expr_builder.constant(strides[0]),
expr_builder);
const nvinfer1::IDimensionExpr* output_width =
CalcOutputSize(in_dims.d[3],
expr_builder.constant(kernel_sizes[1]),
expr_builder.constant(dilations[1]),
expr_builder.constant(paddings[1]),
expr_builder.constant(paddings[3]),
expr_builder.constant(strides[1]),
expr_builder);
const nvinfer1::IDimensionExpr* output_col_length = expr_builder.operation(
nvinfer1::DimensionOperation::kPROD, *output_height, *output_width);
out_dims.push_back(output_col_length);
nvinfer1::DimsExprs output;
output.nbDims = out_dims.size();
for (size_t i = 0; i < out_dims.size(); i++) output.d[i] = out_dims[i];
return output;
}
nvinfer1::DimsExprs ScatterNdAddInferMeta(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder, // NOLINT
const framework::OpDesc& op_desc) {
PADDLE_ENFORCE_EQ(nb_inputs,
3,
phi::errors::InvalidArgument(
"inputs of scatter_nd_add should be equal to 3, "
"But received (%s)",
nb_inputs));
const nvinfer1::DimsExprs ref_dims = inputs[0];
return ref_dims;
}
nvinfer1::DimsExprs UnchangedInferMeta(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder, // NOLINT
const framework::OpDesc& op_desc) {
PADDLE_ENFORCE_EQ(nb_inputs,
1,
phi::errors::InvalidArgument(
"inputs of UnchangedInferMeta should be equal to 1, "
"But received (%s)",
nb_inputs));
return inputs[0];
}
nvinfer1::DimsExprs Pad3dInferMeta(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder, // NOLINT
const framework::OpDesc& op_desc) {
const nvinfer1::DimsExprs x_dim = inputs[0];
nvinfer1::DimsExprs out_dims;
out_dims.nbDims = x_dim.nbDims;
out_dims.d[0] = x_dim.d[0];
auto paddings =
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("paddings"));
auto data_format =
PADDLE_GET_CONST(std::string, op_desc.GetAttr("data_format"));
if (data_format == "NCDHW") {
out_dims.d[1] = x_dim.d[1];
} else {
out_dims.d[4] = x_dim.d[4];
}
if (data_format == "NCDHW") {
// depth
out_dims.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*x_dim.d[2],
*expr_builder.constant(paddings[4])),
*expr_builder.constant(paddings[5]));
// height
out_dims.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*x_dim.d[3],
*expr_builder.constant(paddings[2])),
*expr_builder.constant(paddings[3]));
// width
out_dims.d[4] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*x_dim.d[4],
*expr_builder.constant(paddings[0])),
*expr_builder.constant(paddings[1]));
} else { // NDHWC
// depth
out_dims.d[1] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*x_dim.d[1],
*expr_builder.constant(paddings[4])),
*expr_builder.constant(paddings[5]));
// height
out_dims.d[2] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*x_dim.d[2],
*expr_builder.constant(paddings[2])),
*expr_builder.constant(paddings[3]));
// width
out_dims.d[3] = expr_builder.operation(
nvinfer1::DimensionOperation::kSUM,
*expr_builder.operation(nvinfer1::DimensionOperation::kSUM,
*x_dim.d[3],
*expr_builder.constant(paddings[0])),
*expr_builder.constant(paddings[1]));
}
return out_dims;
}
nvinfer1::DimsExprs PNormInferMeta(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder, // NOLINT
const framework::OpDesc& op_desc) {
const nvinfer1::DimsExprs x_dim = inputs[0];
std::vector<const nvinfer1::IDimensionExpr*> reduce_dims;
std::vector<const nvinfer1::IDimensionExpr*> keep_dims;
bool asvector = PADDLE_GET_CONST(bool, op_desc.GetAttr("asvector"));
bool keepdim = PADDLE_GET_CONST(bool, op_desc.GetAttr("keepdim"));
int axis = PADDLE_GET_CONST(int, op_desc.GetAttr("axis"));
if (asvector) {
reduce_dims.emplace_back(expr_builder.constant(1));
keep_dims.emplace_back(expr_builder.constant(1));
if (keepdim) {
for (int i = 1; i < x_dim.nbDims; ++i) {
keep_dims.emplace_back(expr_builder.constant(1));
}
}
} else {
if (axis < 0) axis = x_dim.nbDims + axis;
for (int i = 0; i < x_dim.nbDims; ++i) {
if (i != axis) reduce_dims.emplace_back(x_dim.d[i]);
}
if (reduce_dims.size() == 0) {
reduce_dims.emplace_back(expr_builder.constant(1));
}
}
keep_dims[axis] = expr_builder.constant(1);
nvinfer1::DimsExprs output;
if (keepdim) {
output.nbDims = keep_dims.size();
for (int i = 0; i < output.nbDims; i++) output.d[i] = keep_dims[i];
} else {
output.nbDims = reduce_dims.size();
for (int i = 0; i < output.nbDims; i++) output.d[i] = reduce_dims[i];
}
return output;
}
nvinfer1::DimsExprs GridSamplerInferMeta(
int output_index,
const nvinfer1::DimsExprs* inputs,
int nb_inputs,
nvinfer1::IExprBuilder& expr_builder, // NOLINT
const framework::OpDesc& op_desc) {
const nvinfer1::DimsExprs x_dims = inputs[0];
const nvinfer1::DimsExprs grid_dims = inputs[1];
nvinfer1::DimsExprs output;
if (grid_dims.nbDims == 4) {
output.nbDims = 4;
output.d[0] = x_dims.d[0];
output.d[1] = x_dims.d[1];
output.d[2] = grid_dims.d[1];
output.d[3] = grid_dims.d[2];
} else {
output.nbDims = 4;
output.d[0] = x_dims.d[0];
output.d[1] = x_dims.d[1];
output.d[2] = grid_dims.d[1];
output.d[3] = grid_dims.d[2];
output.d[4] = grid_dims.d[3];
}
return output;
}
PD_REGISTER_DYNAMIC_INFER_META_FN(gather_nd, GatherNdInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(yolo_box, YoloBoxInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(instance_norm, InstanceNormInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(unfold, UnflodInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(scatter_nd_add, ScatterNdAddInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(inverse, UnchangedInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(pad3d, Pad3dInferMeta);
PD_REGISTER_DYNAMIC_INFER_META_FN(grid_sampler, GridSamplerInferMeta);
} // namespace tensorrt
} // namespace inference
} // namespace paddle
......@@ -23,6 +23,11 @@ namespace tensorrt {
USE_TRT_DYNAMIC_INFER_META_FN(gather_nd);
USE_TRT_DYNAMIC_INFER_META_FN(yolo_box);
USE_TRT_DYNAMIC_INFER_META_FN(instance_norm);
USE_TRT_DYNAMIC_INFER_META_FN(unfold);
USE_TRT_DYNAMIC_INFER_META_FN(scatter_nd_add);
USE_TRT_DYNAMIC_INFER_META_FN(pad3d);
USE_TRT_DYNAMIC_INFER_META_FN(inverse);
USE_TRT_DYNAMIC_INFER_META_FN(grid_sampler);
} // namespace tensorrt
} // namespace inference
} // namespace paddle
......@@ -2353,6 +2353,14 @@ struct GenericPluginTeller : public Teller {
if (!desc.HasAttr("iou_aware") && !desc.HasAttr("iou_aware_factor"))
return false;
}
if (op_type == "pad3d") {
auto pad3d_inputs = desc.Inputs();
if (pad3d_inputs.find("Paddings") != pad3d_inputs.end()) {
if (desc.Input("Paddings").size() >= 1) {
return false;
}
}
}
if (use_no_calib_int8) {
return false;
} else {
......
......@@ -290,6 +290,10 @@ bool GenericPlugin::supportsFormatCombination(
if (op_desc_.Type() == "gather_nd" || op_desc_.Type() == "yolo_box") {
if (pos == 0) return in_out[pos].type == nvinfer1::DataType::kFLOAT;
if (pos == 1) return in_out[pos].type == nvinfer1::DataType::kINT32;
} else if (op_desc_.Type() == "scatter_nd_add") {
if (pos == 0) return in_out[pos].type == nvinfer1::DataType::kFLOAT;
if (pos == 1) return in_out[pos].type == nvinfer1::DataType::kINT32;
if (pos == 2) return in_out[pos].type == nvinfer1::DataType::kFLOAT;
} else {
return in_out[pos].type == nvinfer1::DataType::kFLOAT;
}
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import List
import unittest
class TrtConvertGridSampler(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1():
return np.random.random([1, 3, 32, 32]).astype(np.float32)
def generate_input2():
return np.random.random([1, 3, 3, 2]).astype(np.float32)
ops_config = [{
"op_type": "grid_sampler",
"op_inputs": {
"X": ["input_data"],
"Grid": ["grid_data"],
},
"op_outputs": {
"Output": ["output_data"]
},
"op_attrs": {}
}]
ops = self.generate_op_config(ops_config)
for i in range(10):
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input1)),
"grid_data":
TensorConfig(data_gen=partial(generate_input2)),
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32],
"grid_data": [1, 3, 3, 2]
}
self.dynamic_shape.max_input_shape = {
"input_data": [1, 3, 64, 64],
"grid_data": [1, 3, 4, 4]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 32, 32],
"grid_data": [1, 3, 3, 2]
}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (0, 4), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (0, 4), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 3), 1e-5
def test(self):
self.run_test()
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import List
import unittest
class TrtConvertInverse(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1():
return np.random.random([32, 32]).astype(np.float32)
ops_config = [{
"op_type": "inverse",
"op_inputs": {
"Input": ["input_data"],
},
"op_outputs": {
"Output": ["output_data"]
},
"op_attrs": {}
}]
ops = self.generate_op_config(ops_config)
for i in range(10):
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input1)),
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {
"input_data": [1, 1],
}
self.dynamic_shape.max_input_shape = {
"input_data": [64, 64],
}
self.dynamic_shape.opt_input_shape = {
"input_data": [32, 32],
}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (0, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (0, 3), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 2), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 2), 1e-5
def test(self):
self.run_test()
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import List
import unittest
class TrtConvertPad3d(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1():
return np.ones([1, 1, 3, 64, 64]).astype(np.float32)
for value in [True, False]:
for paddings in [[0, 0, 0, 0, 1, 1], [0, 0, 1, 2, 3, 4],
[1, 1, 1, 1, 1, 1], [0, 0, -1, -1, 1, 1]]:
dics = [{"value": value, "paddings": paddings}, {}]
ops_config = [{
"op_type": "pad3d",
"op_inputs": {
"X": ["input_data"]
},
"op_outputs": {
"Out": ["output_data"]
},
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)
for i in range(10):
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input1)),
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {
"input_data": [1, 1, 3, 64, 64]
}
self.dynamic_shape.max_input_shape = {
"input_data": [1, 1, 3, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 1, 3, 64, 64]
}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (0, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (0, 3), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 2), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 2), 1e-5
def test(self):
self.run_test()
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import List
import unittest
class TrtConvertScatterNd(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1():
return np.random.random([6]).astype(np.float32)
def generate_input2():
return np.random.random([4, 1]).astype(np.int32)
def generate_input3():
return np.random.random([4]).astype(np.float32)
ops_config = [{
"op_type": "scatter_nd_add",
"op_inputs": {
"X": ["input_data"],
"Index": ["index_data"],
"Updates": ["update_data"]
},
"op_outputs": {
"Out": ["output_data"]
},
"op_attrs": {}
}]
ops = self.generate_op_config(ops_config)
for i in range(10):
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input1)),
"index_data":
TensorConfig(data_gen=partial(generate_input2)),
"update_data":
TensorConfig(data_gen=partial(generate_input3)),
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {
"input_data": [1],
"index_data": [2, 1],
"update_data": [1],
}
self.dynamic_shape.max_input_shape = {
"input_data": [6],
"index_data": [4, 1],
"update_data": [4],
}
self.dynamic_shape.opt_input_shape = {
"input_data": [6],
"index_data": [4, 1],
"update_data": [4],
}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (0, 5), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (0, 5), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 4), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 4), 1e-5
def test(self):
self.run_test()
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest
from program_config import TensorConfig, ProgramConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import List
import unittest
class TrtConvertUnfold(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1():
return np.random.random([1, 3, 24, 24]).astype(np.float32)
ops_config = [{
"op_type": "unfold",
"op_inputs": {
"X": ["input_data"],
},
"op_outputs": {
"Y": ["output_data"]
},
"op_attrs": {
"dilations": [1, 1],
"kernel_sizes": [4, 4],
"paddings": [0, 0, 0, 0],
"strides": [1, 1],
}
}]
ops = self.generate_op_config(ops_config)
for i in range(10):
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input1)),
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 4, 4],
}
self.dynamic_shape.max_input_shape = {
"input_data": [1, 3, 24, 24],
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 24, 24],
}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (0, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (0, 3), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 2), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 2), 1e-5
def test(self):
self.run_test()
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册