From 91b65e58225032584599ec96f6f5de49314b7a43 Mon Sep 17 00:00:00 2001 From: HongyuJia Date: Mon, 5 Dec 2022 10:35:38 +0800 Subject: [PATCH] [Clean fluid] Clean fluid elementwise_arithmetic (part2) (#48461) * clean elem_arithmetic part2 unittest * fix test_model_cast_to_bf16 * restore test_model_cast_to_bf16 --- .../slim/tests/test_quantization_pass.py | 14 ++-- .../tests/test_user_defined_quantization.py | 8 +-- .../tests/test_image_classification_fp16.py | 2 +- .../tests/test_multi_precision_fp16_train.py | 2 +- .../contrib/tests/test_quantize_transpiler.py | 2 +- .../contrib/tests/test_weight_decay_extend.py | 4 +- .../tests/book/test_image_classification.py | 2 +- .../fleet/hybrid_parallel_pp_embedding.py | 4 +- .../fleet/hybrid_parallel_shared_weight.py | 4 +- .../fleet/parallel_dygraph_se_resnext.py | 2 +- .../test_mkldnn_elt_act_fuse_pass.py | 72 +++++++++---------- .../test_mkldnn_inplace_fuse_pass.py | 4 +- .../ir/inference/test_trt_subgraph_pass.py | 4 +- ...r_embedding_eltwise_layernorm_fuse_pass.py | 11 +-- .../unittests/ir/test_ir_fusion_group_pass.py | 8 +-- .../ir/test_ir_skip_layernorm_pass.py | 2 +- .../mkldnn/check_flags_mkldnn_ops_on_off.py | 3 +- 17 files changed, 69 insertions(+), 79 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py b/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py index f64a047ea4..5aabeee119 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py @@ -72,7 +72,7 @@ def residual_block(num, quant_skip_pattern=None): for _ in range(num): conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True) short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None) - hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') + hidden = paddle.nn.functional.relu(paddle.add(x=conv, y=short)) matmul_weight = paddle.create_parameter( shape=[1, 16, 32, 32], dtype='float32' ) @@ -723,7 +723,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None): for _ in range(num): conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True) short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None) - hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') + hidden = paddle.nn.functional.relu(paddle.add(x=conv, y=short)) hidden = fluid.layers.matmul(hidden, data2, True, True) if isinstance(quant_skip_pattern, str): with fluid.name_scope(quant_skip_pattern): @@ -733,9 +733,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None): pool2 = fluid.layers.pool2d( input=hidden, pool_size=2, pool_type='max', pool_stride=2 ) - pool_add = fluid.layers.elementwise_add( - x=pool1, y=pool2, act='relu' - ) + pool_add = paddle.nn.functional.relu(paddle.add(x=pool1, y=pool2)) elif isinstance(quant_skip_pattern, list): assert ( len(quant_skip_pattern) > 1 @@ -748,9 +746,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None): input=hidden, pool_size=2, pool_type='max', pool_stride=2 ) with fluid.name_scope(quant_skip_pattern[1]): - pool_add = fluid.layers.elementwise_add( - x=pool1, y=pool2, act='relu' - ) + pool_add = paddle.nn.functional.relu(paddle.add(x=pool1, y=pool2)) else: pool1 = fluid.layers.pool2d( input=hidden, pool_size=2, pool_type='avg', pool_stride=2 @@ -758,7 +754,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None): pool2 = fluid.layers.pool2d( input=hidden, pool_size=2, pool_type='max', pool_stride=2 ) - pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu') + pool_add = paddle.nn.functional.relu(paddle.add(x=pool1, y=pool2)) fc = fluid.layers.fc(input=pool_add, size=10) loss = fluid.layers.cross_entropy(input=fc, label=label) loss = paddle.mean(loss) diff --git a/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py b/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py index cc8136e3b7..2565627813 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py +++ b/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py @@ -73,12 +73,8 @@ def pact(x, name=None): learning_rate=1, ) u_param = helper.create_parameter(attr=u_param_attr, shape=[1], dtype=dtype) - x = fluid.layers.elementwise_sub( - x, fluid.layers.relu(fluid.layers.elementwise_sub(x, u_param)) - ) - x = fluid.layers.elementwise_add( - x, fluid.layers.relu(fluid.layers.elementwise_sub(-u_param, x)) - ) + x = paddle.subtract(x, fluid.layers.relu(paddle.subtract(x, u_param))) + x = paddle.add(x, fluid.layers.relu(paddle.subtract(-u_param, x))) return x diff --git a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py index 362dde4d48..908622d76a 100644 --- a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py +++ b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py @@ -53,7 +53,7 @@ def resnet_cifar10(input, depth=32): tmp = conv_bn_layer(input, ch_out, 3, stride, 1) tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) short = shortcut(input, ch_in, ch_out, stride) - return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') + return paddle.nn.functional.relu(paddle.add(x=tmp, y=short)) def layer_warp(block_func, input, ch_in, ch_out, count, stride): tmp = block_func(input, ch_in, ch_out, stride) diff --git a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py index 4265594f71..8f4bf36e5b 100644 --- a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py +++ b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py @@ -71,7 +71,7 @@ def resnet_cifar10(input, depth=32): tmp = conv_bn_layer(input, ch_out, 3, stride, 1) tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) short = shortcut(input, ch_in, ch_out, stride) - return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') + return paddle.nn.functional.relu(paddle.add(x=tmp, y=short)) def layer_warp(block_func, input, ch_in, ch_out, count, stride): tmp = block_func(input, ch_in, ch_out, stride) diff --git a/python/paddle/fluid/contrib/tests/test_quantize_transpiler.py b/python/paddle/fluid/contrib/tests/test_quantize_transpiler.py index 082dbe5bdb..cdbd65fad6 100644 --- a/python/paddle/fluid/contrib/tests/test_quantize_transpiler.py +++ b/python/paddle/fluid/contrib/tests/test_quantize_transpiler.py @@ -56,7 +56,7 @@ def residual_block(num): for _ in range(num): conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True) short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None) - hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') + hidden = paddle.nn.functional.relu(paddle.add(x=conv, y=short)) fc = fluid.layers.fc(input=hidden, size=10) loss = fluid.layers.cross_entropy(input=fc, label=label) loss = paddle.mean(loss) diff --git a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py index 70c63c1d54..7b40d513f2 100644 --- a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py +++ b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py @@ -171,9 +171,7 @@ class TestWeightDecay(unittest.TestCase): ] for params in param_list: - updated_p = fluid.layers.elementwise_sub( - x=params[0], y=params[1] - ) + updated_p = paddle.subtract(x=params[0], y=params[1]) fluid.layers.assign(input=updated_p, output=params[0]) optimizer.apply_optimize(avg_cost, startup_prog, params_grads) diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index beb562bee5..3a401df203 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -52,7 +52,7 @@ def resnet_cifar10(input, depth=32): tmp = conv_bn_layer(input, ch_out, 3, stride, 1) tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) short = shortcut(input, ch_in, ch_out, stride) - return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') + return paddle.nn.functional.relu(paddle.add(x=tmp, y=short)) def layer_warp(block_func, input, ch_in, ch_out, count, stride): tmp = block_func(input, ch_in, ch_out, stride) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_embedding.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_embedding.py index 104aa658ec..0d1e7084ab 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_embedding.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_embedding.py @@ -55,7 +55,7 @@ class SimpleNet(Layer): def forward(self, x1, x2, y1): x_emb = self.word_embeddings(x1) fc = fluid.layers.matmul(x_emb, self.softmax_weight) - fc = fluid.layers.elementwise_add(fc, self.softmax_bias) + fc = paddle.add(fc, self.softmax_bias) projection = paddle.reshape(fc, shape=[-1, vocab_size]) loss = paddle.nn.functional.softmax_with_cross_entropy( logits=projection, label=y1, soft_label=False @@ -95,7 +95,7 @@ class BiasNet(Layer): def forward(self, args): fc, x2 = args - fc = fluid.layers.elementwise_add(fc, self.softmax_bias) + fc = paddle.add(fc, self.softmax_bias) projection = paddle.reshape(fc, shape=[-1, vocab_size]) return projection, x2 diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_shared_weight.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_shared_weight.py index 58c0fe7465..4560789212 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_shared_weight.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_shared_weight.py @@ -62,7 +62,7 @@ class SimpleNet(Layer): def forward(self, x1, x2, y1): x_emb = self.word_embeddings(x1) fc = fluid.layers.matmul(x_emb, self.softmax_weight) - fc = fluid.layers.elementwise_add(fc, self.softmax_bias) + fc = paddle.add(fc, self.softmax_bias) projection = paddle.reshape(fc, shape=[-1, vocab_size]) projection = paddle.matmul(projection, self.word_embeddings.weight) @@ -109,7 +109,7 @@ class BiasNet(Layer): def forward(self, args): fc, x2 = args - fc = fluid.layers.elementwise_add(fc, self.softmax_bias) + fc = paddle.add(fc, self.softmax_bias) projection = paddle.reshape(fc, shape=[-1, vocab_size]) return projection, x2 diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py index 164f1410ed..eef3f6bdd7 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py @@ -206,7 +206,7 @@ class BottleneckBlock(fluid.dygraph.Layer): else: short = self.short(inputs) - y = fluid.layers.elementwise_add(x=short, y=scale, act='relu') + y = paddle.nn.functional.relu(paddle.add(x=short, y=scale)) return y diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py index ac635436f6..2026a54116 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -53,7 +53,7 @@ class ElementwiseActivationMkldnnFusePassTest(InferencePassTest): self.enable_mkldnn = True def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = None def test_check_output(self): @@ -68,7 +68,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = fluid.layers.relu @@ -76,7 +76,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Tanh( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = paddle.tanh @@ -84,7 +84,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_LeakyRelu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act_alpha = 0.2 self.act = paddle.nn.functional.leaky_relu @@ -93,7 +93,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Swish( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = paddle.nn.functional.swish @@ -101,7 +101,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = fluid.layers.hard_swish @@ -109,7 +109,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_SQRT( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = paddle.sqrt @@ -117,7 +117,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_ABS( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = paddle.abs @@ -125,7 +125,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Clip( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = fluid.layers.clip self.act_alpha = 0.0 self.act_beta = 10.0 @@ -135,7 +135,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = paddle.nn.functional.gelu @@ -143,7 +143,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu_Tanh( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = paddle.nn.functional.gelu self.act_alpha = True @@ -152,7 +152,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu6( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = paddle.nn.functional.relu6 @@ -160,7 +160,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Sigmoid( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_add + self.operand = paddle.add self.act = paddle.nn.functional.sigmoid @@ -168,7 +168,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = fluid.layers.relu @@ -176,7 +176,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Tanh( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = paddle.tanh @@ -184,7 +184,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_LeakyRelu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act_alpha = 0.2 self.act = paddle.nn.functional.leaky_relu @@ -193,7 +193,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Swish( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = paddle.nn.functional.swish @@ -201,7 +201,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = fluid.layers.hard_swish @@ -209,7 +209,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_ABS( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = paddle.abs @@ -217,7 +217,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Clip( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = fluid.layers.clip self.act_alpha = 0.0 self.act_beta = 10.0 @@ -227,7 +227,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = paddle.nn.functional.gelu @@ -235,7 +235,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu_Tanh( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = paddle.nn.functional.gelu self.act_alpha = True @@ -244,7 +244,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu6( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = paddle.nn.functional.relu6 @@ -252,7 +252,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Sigmoid( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_sub + self.operand = paddle.subtract self.act = paddle.nn.functional.sigmoid @@ -260,7 +260,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = fluid.layers.relu @@ -268,7 +268,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Tanh( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = paddle.tanh @@ -276,7 +276,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_LeakyRelu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act_alpha = 0.2 self.act = paddle.nn.functional.leaky_relu @@ -285,7 +285,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Swish( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = paddle.nn.functional.swish @@ -293,7 +293,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = fluid.layers.hard_swish @@ -301,7 +301,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_SQRT( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = paddle.sqrt @@ -309,7 +309,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_ABS( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = paddle.abs @@ -317,7 +317,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Clip( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = fluid.layers.clip self.act_alpha = 0.0 self.act_beta = 10.0 @@ -327,7 +327,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = paddle.nn.functional.gelu @@ -335,7 +335,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu_Tanh( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = paddle.nn.functional.gelu self.act_alpha = True @@ -344,7 +344,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu6( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = paddle.nn.functional.relu6 @@ -352,7 +352,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Sigmoid( ElementwiseActivationMkldnnFusePassTest ): def set_params(self): - self.operand = fluid.layers.elementwise_mul + self.operand = paddle.multiply self.act = paddle.nn.functional.sigmoid diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py index 386dcf7b40..a2ac6d42e5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py @@ -34,9 +34,7 @@ class MkldnnInplacePassTest(InferencePassTest): ) softmax_out = paddle.nn.functional.softmax(conv_out_1) relu_out = fluid.layers.relu(conv_out_1) - eltwise_out = fluid.layers.elementwise_add( - softmax_out, relu_out, axis=-1 - ) + eltwise_out = paddle.add(softmax_out, relu_out) self.pass_name = 'mkldnn_inplace_pass' self.feeds = { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py index 235f2446cb..a0f034462f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py @@ -378,7 +378,7 @@ class TensorRTSubgraphPassElementwiseTest(InferencePassTest): self.fetch_list = [out] def append_eltwise(self, data1, data2): - return fluid.layers.elementwise_add(x=data1, y=data2) + return paddle.add(x=data1, y=data2) def test_check_output(self): if core.is_compiled_with_cuda(): @@ -439,7 +439,7 @@ class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest): self.fetch_list = [out] def append_eltwise(self, data1, data2): - return fluid.layers.elementwise_add(x=data1, y=data2) + return paddle.add(x=data1, y=data2) def test_check_output(self): if os.path.exists(self.path + "_opt_cache"): diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py index 2eb3cf9938..2f3df45524 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py @@ -17,6 +17,7 @@ import unittest import numpy as np from pass_test import PassTest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core @@ -51,8 +52,8 @@ class EmbEltwiseLayerNormFusePassTest(PassTest): sent_emb = fluid.layers.embedding( input=sent_id, size=(128, 768), dtype='float32' ) - add1 = fluid.layers.elementwise_add(word_emb, pos_emb) - add2 = fluid.layers.elementwise_add(add1, sent_emb) + add1 = paddle.add(word_emb, pos_emb) + add2 = paddle.add(add1, sent_emb) hidden1 = fluid.layers.layer_norm(input=add2, begin_norm_axis=2) id1 = fluid.layers.data( @@ -91,9 +92,9 @@ class EmbEltwiseLayerNormFusePassTest(PassTest): emb4 = fluid.layers.embedding( input=id4, size=(128, 768), dtype='float32' ) - add_1 = fluid.layers.elementwise_add(emb1, emb2) - add_2 = fluid.layers.elementwise_add(add_1, emb3) - add_3 = fluid.layers.elementwise_add(add_2, emb4) + add_1 = paddle.add(emb1, emb2) + add_2 = paddle.add(add_1, emb3) + add_3 = paddle.add(add_2, emb4) hidden_1 = fluid.layers.layer_norm(input=add_3, begin_norm_axis=2) self.feeds = { diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py index 1538bac16f..47b65f5626 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py @@ -142,7 +142,7 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): zero = layers.fill_constant(shape=[128], dtype="float16", value=0) # TODO(xreki): fix precision problem when using softmax of float16. # tmp_2 = layers.softmax(tmp_1) - tmp_2 = layers.elementwise_add(tmp_1, zero) + tmp_2 = paddle.add(tmp_1, zero) tmp_3 = layers.mul(tmp_0, self.feed_vars[2]) # subgraph with 4 op nodes tmp_3 = layers.cast(tmp_2, dtype="float16") @@ -184,7 +184,7 @@ class FusionGroupPassCastTest(FusionGroupPassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) - tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1]) + tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1]) tmp_1 = layers.cast(tmp_0, dtype="float64") tmp_2 = layers.cast(tmp_1, dtype="float32") @@ -205,12 +205,12 @@ class FusionGroupPassFillConstantTest(FusionGroupPassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) - tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1]) + tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1]) tmp_1 = layers.fill_constant(shape=[2, 2], dtype=dtype, value=2.0) tmp_2 = paddle.scale( tmp_1, scale=3.0, bias=1.0, bias_after_scale=True ) - tmp_3 = layers.elementwise_mul(tmp_2, tmp_0) + tmp_3 = paddle.multiply(tmp_2, tmp_0) self.append_gradients(tmp_3) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py index ea2eadd36a..829dbcadcd 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py @@ -31,7 +31,7 @@ class SkipLayerNormFusePassTest(PassTest): y = fluid.data( name="y", shape=[128, 768], dtype="float32", lod_level=0 ) - elementwise_out = fluid.layers.elementwise_add(x=x, y=y) + elementwise_out = paddle.add(x=x, y=y) out = fluid.layers.layer_norm(input=elementwise_out) self.fetch_list = [out] diff --git a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py index d11ca11740..aa9811a94b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/check_flags_mkldnn_ops_on_off.py @@ -16,6 +16,7 @@ import os import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.framework import _enable_legacy_dygraph, _global_flags from paddle.fluid.layer_helper import LayerHelper @@ -48,7 +49,7 @@ def check(): with fluid.dygraph.guard(fluid.core.CPUPlace()): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) - y = fluid.layers.elementwise_add(x=a, y=b) + y = paddle.add(x=a, y=b) y = fluid.layers.matmul(x=y, y=b, transpose_y=True) res1 = func(y) -- GitLab