未验证 提交 0eea5d71 编写于 作者: Y yingshengBD 提交者: GitHub

post quantize support insert fake_quantize_dequantize node before the OPs that...

post quantize support insert fake_quantize_dequantize node before the OPs that will be used in VIS's faceid models (#30659)

test=develop
上级 06a3e311
......@@ -460,6 +460,10 @@ class PostTrainingQuantization(object):
graph = _apply_pass(self._scope, graph, 'conv_bn_fuse_pass')
graph = _apply_pass(self._scope, graph, 'depthwise_conv_bn_fuse_pass')
graph = _apply_pass(self._scope, graph, 'conv_transpose_bn_fuse_pass')
graph = _apply_pass(self._scope, graph, 'conv_eltwiseadd_bn_fuse_pass')
graph = _apply_pass(self._scope, graph,
'depthwise_conv_eltwiseadd_bn_fuse_pass')
self._program = graph.to_program()
def _collect_target_varnames(self):
......
......@@ -74,6 +74,11 @@ _out_scale_op_list = [
"bilinear_interp",
"nearest_interp",
"trilinear_interp",
"flatten",
"flatten2",
"transpose",
"pad2d",
"reshape",
]
# list op real input and output names, to avoid processing input such as AxisTensor.
......@@ -121,6 +126,9 @@ _op_real_in_out_name = {
"hard_sigmoid": [["X"], ["Out"]],
"gru": [["Input", "Weight"], ["Hidden"]],
"lstm": [["Input", "Weight"], ["Hidden"]],
"pad2d": [["X"], ["Out"]],
"flatten": [["X"], ["Out"]],
"flatten2": [["X"], ["Out"]],
}
_conv_ops = ['conv2d', 'depthwise_conv2d', 'conv2d_transpose']
......@@ -1691,7 +1699,8 @@ class AddQuantDequantPass(object):
"less_than", "mean", "not_equal", "reshape", "reshape2",
"bilinear_interp", "nearest_interp", "trilinear_interp", "slice",
"squeeze", "elementwise_sub", "mul", "matmul", "relu", "relu6",
"leaky_relu", "tanh", "swish"
"leaky_relu", "tanh", "swish", "scale", "transpose", "transpose2",
"sigmoid", "pad2d", "flatten", "flatten2", "batch_norm"
]
# To be compatible with PaddleSlim, not remove _activation_type for now
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册