diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 702ec4d9f940d6c0dd7cfdfe8b84bebd65192c42..4e9924260b45e0c14f7d2a056ae4b4c9dba6c520 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -1244,10 +1244,13 @@ class AddQuantDequantPass(object): for input_name in input_names: in_node = graph._find_node_by_name(op_node.inputs, input_name) - quant_var_node, scale_var_node = \ - self._inser_quant_dequant_moving_average_abs_max_op( - graph, in_node, self._quant_bits) - dequantized_vars_map[input_name] = quant_var_node + if input_name in dequantized_vars_map: + quant_var_node = dequantized_vars_map[input_name] + else: + quant_var_node, scale_var_node = \ + self._inser_quant_dequant_moving_average_abs_max_op( + graph, in_node, self._quant_bits) + dequantized_vars_map[input_name] = quant_var_node graph.update_input_link(in_node, quant_var_node, op_node) for op_node in ops: diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py b/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py index c1f03999bc1d57b3e678164b05548ed17d771b96..3080a6e60d2cdab7c90c091c95b1b6952b1b980f 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py @@ -505,6 +505,52 @@ class TestQuantizationFreezePass(unittest.TestCase): for_ci=True) +def quant_dequant_residual_block(num, quant_skip_pattern=None): + def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + act='relu', + bias_attr=False): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr) + return fluid.layers.batch_norm(input=tmp, act=act) + + data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = data + for _ in six.moves.xrange(num): + conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True) + short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None) + hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') + + if quant_skip_pattern: + with fluid.name_scope(quant_skip_pattern): + pool1 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='avg', pool_stride=2) + pool2 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='max', pool_stride=2) + pool_add = fluid.layers.elementwise_add( + x=pool1, y=pool2, act='relu') + else: + pool1 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='avg', pool_stride=2) + pool2 = fluid.layers.pool2d( + input=hidden, pool_size=2, pool_type='max', pool_stride=2) + pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu') + fc = fluid.layers.fc(input=pool_add, size=10) + loss = fluid.layers.cross_entropy(input=fc, label=label) + loss = fluid.layers.mean(loss) + return loss + + class TestAddQuantDequantPass(unittest.TestCase): def setUp(self): self._target_ops = {'elementwise_add', 'pool2d'} @@ -535,7 +581,7 @@ class TestAddQuantDequantPass(unittest.TestCase): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - loss = residual_block(2, skip_pattern) + loss = quant_dequant_residual_block(2, skip_pattern) opt = fluid.optimizer.Adam(learning_rate=0.001) opt.minimize(loss) place = fluid.CPUPlace()