未验证 提交 91b65e58 编写于 作者: H HongyuJia 提交者: GitHub

[Clean fluid] Clean fluid elementwise_arithmetic (part2) (#48461)

* clean elem_arithmetic part2 unittest

* fix test_model_cast_to_bf16

* restore test_model_cast_to_bf16
上级 a842c1d0
......@@ -72,7 +72,7 @@ def residual_block(num, quant_skip_pattern=None):
for _ in range(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
hidden = paddle.nn.functional.relu(paddle.add(x=conv, y=short))
matmul_weight = paddle.create_parameter(
shape=[1, 16, 32, 32], dtype='float32'
)
......@@ -723,7 +723,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None):
for _ in range(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
hidden = paddle.nn.functional.relu(paddle.add(x=conv, y=short))
hidden = fluid.layers.matmul(hidden, data2, True, True)
if isinstance(quant_skip_pattern, str):
with fluid.name_scope(quant_skip_pattern):
......@@ -733,9 +733,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None):
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2
)
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu'
)
pool_add = paddle.nn.functional.relu(paddle.add(x=pool1, y=pool2))
elif isinstance(quant_skip_pattern, list):
assert (
len(quant_skip_pattern) > 1
......@@ -748,9 +746,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None):
input=hidden, pool_size=2, pool_type='max', pool_stride=2
)
with fluid.name_scope(quant_skip_pattern[1]):
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu'
)
pool_add = paddle.nn.functional.relu(paddle.add(x=pool1, y=pool2))
else:
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2
......@@ -758,7 +754,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None):
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2
)
pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu')
pool_add = paddle.nn.functional.relu(paddle.add(x=pool1, y=pool2))
fc = fluid.layers.fc(input=pool_add, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = paddle.mean(loss)
......
......@@ -73,12 +73,8 @@ def pact(x, name=None):
learning_rate=1,
)
u_param = helper.create_parameter(attr=u_param_attr, shape=[1], dtype=dtype)
x = fluid.layers.elementwise_sub(
x, fluid.layers.relu(fluid.layers.elementwise_sub(x, u_param))
)
x = fluid.layers.elementwise_add(
x, fluid.layers.relu(fluid.layers.elementwise_sub(-u_param, x))
)
x = paddle.subtract(x, fluid.layers.relu(paddle.subtract(x, u_param)))
x = paddle.add(x, fluid.layers.relu(paddle.subtract(-u_param, x)))
return x
......
......@@ -53,7 +53,7 @@ def resnet_cifar10(input, depth=32):
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)
short = shortcut(input, ch_in, ch_out, stride)
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
return paddle.nn.functional.relu(paddle.add(x=tmp, y=short))
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
tmp = block_func(input, ch_in, ch_out, stride)
......
......@@ -71,7 +71,7 @@ def resnet_cifar10(input, depth=32):
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)
short = shortcut(input, ch_in, ch_out, stride)
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
return paddle.nn.functional.relu(paddle.add(x=tmp, y=short))
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
tmp = block_func(input, ch_in, ch_out, stride)
......
......@@ -56,7 +56,7 @@ def residual_block(num):
for _ in range(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
hidden = paddle.nn.functional.relu(paddle.add(x=conv, y=short))
fc = fluid.layers.fc(input=hidden, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = paddle.mean(loss)
......
......@@ -171,9 +171,7 @@ class TestWeightDecay(unittest.TestCase):
]
for params in param_list:
updated_p = fluid.layers.elementwise_sub(
x=params[0], y=params[1]
)
updated_p = paddle.subtract(x=params[0], y=params[1])
fluid.layers.assign(input=updated_p, output=params[0])
optimizer.apply_optimize(avg_cost, startup_prog, params_grads)
......
......@@ -52,7 +52,7 @@ def resnet_cifar10(input, depth=32):
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)
short = shortcut(input, ch_in, ch_out, stride)
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
return paddle.nn.functional.relu(paddle.add(x=tmp, y=short))
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
tmp = block_func(input, ch_in, ch_out, stride)
......
......@@ -55,7 +55,7 @@ class SimpleNet(Layer):
def forward(self, x1, x2, y1):
x_emb = self.word_embeddings(x1)
fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
fc = paddle.add(fc, self.softmax_bias)
projection = paddle.reshape(fc, shape=[-1, vocab_size])
loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=projection, label=y1, soft_label=False
......@@ -95,7 +95,7 @@ class BiasNet(Layer):
def forward(self, args):
fc, x2 = args
fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
fc = paddle.add(fc, self.softmax_bias)
projection = paddle.reshape(fc, shape=[-1, vocab_size])
return projection, x2
......
......@@ -62,7 +62,7 @@ class SimpleNet(Layer):
def forward(self, x1, x2, y1):
x_emb = self.word_embeddings(x1)
fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
fc = paddle.add(fc, self.softmax_bias)
projection = paddle.reshape(fc, shape=[-1, vocab_size])
projection = paddle.matmul(projection, self.word_embeddings.weight)
......@@ -109,7 +109,7 @@ class BiasNet(Layer):
def forward(self, args):
fc, x2 = args
fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
fc = paddle.add(fc, self.softmax_bias)
projection = paddle.reshape(fc, shape=[-1, vocab_size])
return projection, x2
......
......@@ -206,7 +206,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale, act='relu')
y = paddle.nn.functional.relu(paddle.add(x=short, y=scale))
return y
......
......@@ -53,7 +53,7 @@ class ElementwiseActivationMkldnnFusePassTest(InferencePassTest):
self.enable_mkldnn = True
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = None
def test_check_output(self):
......@@ -68,7 +68,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = fluid.layers.relu
......@@ -76,7 +76,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Tanh(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = paddle.tanh
......@@ -84,7 +84,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_LeakyRelu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act_alpha = 0.2
self.act = paddle.nn.functional.leaky_relu
......@@ -93,7 +93,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Swish(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = paddle.nn.functional.swish
......@@ -101,7 +101,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = fluid.layers.hard_swish
......@@ -109,7 +109,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_SQRT(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = paddle.sqrt
......@@ -117,7 +117,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_ABS(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = paddle.abs
......@@ -125,7 +125,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Clip(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = fluid.layers.clip
self.act_alpha = 0.0
self.act_beta = 10.0
......@@ -135,7 +135,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = paddle.nn.functional.gelu
......@@ -143,7 +143,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Gelu_Tanh(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = paddle.nn.functional.gelu
self.act_alpha = True
......@@ -152,7 +152,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu6(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = paddle.nn.functional.relu6
......@@ -160,7 +160,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Sigmoid(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_add
self.operand = paddle.add
self.act = paddle.nn.functional.sigmoid
......@@ -168,7 +168,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = fluid.layers.relu
......@@ -176,7 +176,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Tanh(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = paddle.tanh
......@@ -184,7 +184,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_LeakyRelu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act_alpha = 0.2
self.act = paddle.nn.functional.leaky_relu
......@@ -193,7 +193,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Swish(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = paddle.nn.functional.swish
......@@ -201,7 +201,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = fluid.layers.hard_swish
......@@ -209,7 +209,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_ABS(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = paddle.abs
......@@ -217,7 +217,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Clip(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = fluid.layers.clip
self.act_alpha = 0.0
self.act_beta = 10.0
......@@ -227,7 +227,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = paddle.nn.functional.gelu
......@@ -235,7 +235,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Gelu_Tanh(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = paddle.nn.functional.gelu
self.act_alpha = True
......@@ -244,7 +244,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu6(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = paddle.nn.functional.relu6
......@@ -252,7 +252,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Sigmoid(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_sub
self.operand = paddle.subtract
self.act = paddle.nn.functional.sigmoid
......@@ -260,7 +260,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = fluid.layers.relu
......@@ -268,7 +268,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Tanh(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = paddle.tanh
......@@ -276,7 +276,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_LeakyRelu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act_alpha = 0.2
self.act = paddle.nn.functional.leaky_relu
......@@ -285,7 +285,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Swish(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = paddle.nn.functional.swish
......@@ -293,7 +293,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = fluid.layers.hard_swish
......@@ -301,7 +301,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_SQRT(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = paddle.sqrt
......@@ -309,7 +309,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_ABS(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = paddle.abs
......@@ -317,7 +317,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Clip(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = fluid.layers.clip
self.act_alpha = 0.0
self.act_beta = 10.0
......@@ -327,7 +327,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = paddle.nn.functional.gelu
......@@ -335,7 +335,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Gelu_Tanh(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = paddle.nn.functional.gelu
self.act_alpha = True
......@@ -344,7 +344,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu6(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = paddle.nn.functional.relu6
......@@ -352,7 +352,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Sigmoid(
ElementwiseActivationMkldnnFusePassTest
):
def set_params(self):
self.operand = fluid.layers.elementwise_mul
self.operand = paddle.multiply
self.act = paddle.nn.functional.sigmoid
......
......@@ -34,9 +34,7 @@ class MkldnnInplacePassTest(InferencePassTest):
)
softmax_out = paddle.nn.functional.softmax(conv_out_1)
relu_out = fluid.layers.relu(conv_out_1)
eltwise_out = fluid.layers.elementwise_add(
softmax_out, relu_out, axis=-1
)
eltwise_out = paddle.add(softmax_out, relu_out)
self.pass_name = 'mkldnn_inplace_pass'
self.feeds = {
......
......@@ -378,7 +378,7 @@ class TensorRTSubgraphPassElementwiseTest(InferencePassTest):
self.fetch_list = [out]
def append_eltwise(self, data1, data2):
return fluid.layers.elementwise_add(x=data1, y=data2)
return paddle.add(x=data1, y=data2)
def test_check_output(self):
if core.is_compiled_with_cuda():
......@@ -439,7 +439,7 @@ class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest):
self.fetch_list = [out]
def append_eltwise(self, data1, data2):
return fluid.layers.elementwise_add(x=data1, y=data2)
return paddle.add(x=data1, y=data2)
def test_check_output(self):
if os.path.exists(self.path + "_opt_cache"):
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
from pass_test import PassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
......@@ -51,8 +52,8 @@ class EmbEltwiseLayerNormFusePassTest(PassTest):
sent_emb = fluid.layers.embedding(
input=sent_id, size=(128, 768), dtype='float32'
)
add1 = fluid.layers.elementwise_add(word_emb, pos_emb)
add2 = fluid.layers.elementwise_add(add1, sent_emb)
add1 = paddle.add(word_emb, pos_emb)
add2 = paddle.add(add1, sent_emb)
hidden1 = fluid.layers.layer_norm(input=add2, begin_norm_axis=2)
id1 = fluid.layers.data(
......@@ -91,9 +92,9 @@ class EmbEltwiseLayerNormFusePassTest(PassTest):
emb4 = fluid.layers.embedding(
input=id4, size=(128, 768), dtype='float32'
)
add_1 = fluid.layers.elementwise_add(emb1, emb2)
add_2 = fluid.layers.elementwise_add(add_1, emb3)
add_3 = fluid.layers.elementwise_add(add_2, emb4)
add_1 = paddle.add(emb1, emb2)
add_2 = paddle.add(add_1, emb3)
add_3 = paddle.add(add_2, emb4)
hidden_1 = fluid.layers.layer_norm(input=add_3, begin_norm_axis=2)
self.feeds = {
......
......@@ -142,7 +142,7 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest):
zero = layers.fill_constant(shape=[128], dtype="float16", value=0)
# TODO(xreki): fix precision problem when using softmax of float16.
# tmp_2 = layers.softmax(tmp_1)
tmp_2 = layers.elementwise_add(tmp_1, zero)
tmp_2 = paddle.add(tmp_1, zero)
tmp_3 = layers.mul(tmp_0, self.feed_vars[2])
# subgraph with 4 op nodes
tmp_3 = layers.cast(tmp_2, dtype="float16")
......@@ -184,7 +184,7 @@ class FusionGroupPassCastTest(FusionGroupPassTest):
with fluid.program_guard(self.main_program, self.startup_program):
self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2)
tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1])
tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1])
tmp_1 = layers.cast(tmp_0, dtype="float64")
tmp_2 = layers.cast(tmp_1, dtype="float32")
......@@ -205,12 +205,12 @@ class FusionGroupPassFillConstantTest(FusionGroupPassTest):
with fluid.program_guard(self.main_program, self.startup_program):
self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2)
tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1])
tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1])
tmp_1 = layers.fill_constant(shape=[2, 2], dtype=dtype, value=2.0)
tmp_2 = paddle.scale(
tmp_1, scale=3.0, bias=1.0, bias_after_scale=True
)
tmp_3 = layers.elementwise_mul(tmp_2, tmp_0)
tmp_3 = paddle.multiply(tmp_2, tmp_0)
self.append_gradients(tmp_3)
......
......@@ -31,7 +31,7 @@ class SkipLayerNormFusePassTest(PassTest):
y = fluid.data(
name="y", shape=[128, 768], dtype="float32", lod_level=0
)
elementwise_out = fluid.layers.elementwise_add(x=x, y=y)
elementwise_out = paddle.add(x=x, y=y)
out = fluid.layers.layer_norm(input=elementwise_out)
self.fetch_list = [out]
......
......@@ -16,6 +16,7 @@ import os
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _enable_legacy_dygraph, _global_flags
from paddle.fluid.layer_helper import LayerHelper
......@@ -48,7 +49,7 @@ def check():
with fluid.dygraph.guard(fluid.core.CPUPlace()):
a = fluid.dygraph.to_variable(a_np)
b = fluid.dygraph.to_variable(b_np)
y = fluid.layers.elementwise_add(x=a, y=b)
y = paddle.add(x=a, y=b)
y = fluid.layers.matmul(x=y, y=b, transpose_y=True)
res1 = func(y)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册