From 53e294cac934734481a227f7afcda49bffcfca52 Mon Sep 17 00:00:00 2001 From: zhulei <563755780@qq.com> Date: Mon, 13 Sep 2021 14:18:50 +0800 Subject: [PATCH] [RC22] Fix linear with matmul_op replace (#35445) * [RC22] Fix linear with matmul_op replace * [RC22] Fix linear with matmul_op replace * [RC22] Fix linear with matmul_op replace * [RC22] Fix linear with matmul_op replace * [RC22] Fix linear with matmul_op replace --- .../contrib/slim/tests/test_imperative_skip_op.py | 10 ++++++++-- .../tests/unittests/test_fleet_static_mp_layers.py | 5 +++-- python/paddle/fluid/tests/unittests/test_linear.py | 2 +- .../fluid/tests/unittests/test_momentum_op.py | 2 +- python/paddle/nn/functional/common.py | 13 ++++--------- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py index bb24f941c62..8d2e0f753c0 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_skip_op.py @@ -93,8 +93,11 @@ class TestImperativeOutSclae(unittest.TestCase): conv2d_count, matmul_count = 0, 0 conv2d_skip_count, matmul_skip_count = 0, 0 + find_conv2d = False + find_matmul = False for i, op in enumerate(model_ops): if op.type == 'conv2d': + find_conv2d = True if op.has_attr("skip_quant"): conv2d_skip_count += 1 if conv2d_count > 0: @@ -106,6 +109,7 @@ class TestImperativeOutSclae(unittest.TestCase): conv2d_count += 1 if op.type == 'matmul': + find_matmul = True if op.has_attr("skip_quant"): matmul_skip_count += 1 if matmul_count > 0: @@ -116,8 +120,10 @@ class TestImperativeOutSclae(unittest.TestCase): 'fake_quantize_dequantize' not in model_ops[i - 1].type) matmul_count += 1 - self.assertTrue(conv2d_skip_count == 1) - self.assertTrue(matmul_skip_count == 1) + if find_conv2d: + self.assertTrue(conv2d_skip_count == 1) + if find_matmul: + self.assertTrue(matmul_skip_count == 1) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fleet_static_mp_layers.py b/python/paddle/fluid/tests/unittests/test_fleet_static_mp_layers.py index c9de3814f0a..ed64c7421d0 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_static_mp_layers.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_static_mp_layers.py @@ -104,7 +104,8 @@ class TestDistTraning(unittest.TestCase): ops = main_program.global_block().ops ops = [op.type for op in ops] self.assertEqual( - ops, ['c_identity', 'matmul', 'elementwise_add', 'c_concat']) + ops, + ['c_identity', 'matmul_v2', 'elementwise_add', 'c_concat']) weight = model_a.parallel_linear.weight bias = model_a.parallel_linear.bias @@ -127,7 +128,7 @@ class TestDistTraning(unittest.TestCase): ops = [op.type for op in ops] self.assertEqual( ops, - ['c_split', 'matmul', 'c_allreduce_sum', 'elementwise_add']) + ['c_split', 'matmul_v2', 'c_allreduce_sum', 'elementwise_add']) weight = model_a.parallel_linear.weight bias = model_a.parallel_linear.bias diff --git a/python/paddle/fluid/tests/unittests/test_linear.py b/python/paddle/fluid/tests/unittests/test_linear.py index 2f722fc8005..59f38d7cad6 100644 --- a/python/paddle/fluid/tests/unittests/test_linear.py +++ b/python/paddle/fluid/tests/unittests/test_linear.py @@ -74,7 +74,7 @@ class LinearTestCase(unittest.TestCase): np.testing.assert_array_almost_equal(res_nn, res_np) def test_error_dummy_input(self, place=paddle.CPUPlace()): - with self.assertRaises(ValueError): + with self.assertRaises(RuntimeError): x_arr = np.array([], dtype=np.float32) x = paddle.to_tensor( np.reshape(x_arr, (0, 4, 4, 4)), dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index e79f6e5eb4a..b42de853c00 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -664,7 +664,7 @@ class TestFusedMomentumWithDecayAPI(unittest.TestCase): self.assertEqual(ops[-3].type, 'sum') self.assertEqual(ops[-4].type, 'scale') self.assertEqual(ops[-5].type, 'sign') - self.assertEqual(ops[-6].type, 'matmul_grad') + self.assertEqual(ops[-6].type, 'matmul_v2_grad') if 'weight' in ops[-1].input('Param'): self.assertEqual(ops[-1].attr('regularization_method'), '') self.assertEqual(ops[-1].attr('regularization_coeff'), 0) diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 43980b6efbc..7cda9a90ff0 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -1467,9 +1467,8 @@ def linear(x, weight, bias=None, name=None): # [2.1077576 2.1077576 2.1077576 2.1077576 ]] """ if in_dygraph_mode(): - pre_bias = _varbase_creator(dtype=x.dtype) - _C_ops.matmul(x, weight, pre_bias, 'transpose_X', False, 'transpose_Y', - False, "alpha", 1) + pre_bias = _C_ops.matmul_v2(x, weight, 'trans_x', False, 'trans_y', + False) if bias is None: return pre_bias @@ -1484,14 +1483,10 @@ def linear(x, weight, bias=None, name=None): check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear') inputs = {'X': [x], 'Y': [weight]} - attrs = { - 'transpose_X': False, - 'transpose_Y': False, - 'alpha': 1, - } + attrs = {'trans_x': False, 'trans_y': False} tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( - type='matmul', inputs=inputs, outputs={'Out': tmp}, attrs=attrs) + type='matmul_v2', inputs=inputs, outputs={'Out': tmp}, attrs=attrs) if bias is not None: res = helper.create_variable_for_type_inference(dtype) helper.append_op( -- GitLab