未验证 提交 5789ac5a 编写于 作者: H HongyuJia 提交者: GitHub

[Clean fluid] Clean fluid elementwise_arithmetic (part6 unit test) (#48467)

* clean elem_arithmetic part6 unittest

* delete op_name_conflict unittest

* restore test_op_name_conflict

* fix codestyle test_op_function_generator
上级 dab1896d
...@@ -111,7 +111,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -111,7 +111,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([self._input, pre_hidden], 1) nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias) gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
...@@ -226,7 +226,7 @@ class PtbModel(fluid.Layer): ...@@ -226,7 +226,7 @@ class PtbModel(fluid.Layer):
rnn_out, shape=[-1, self.num_steps, self.hidden_size] rnn_out, shape=[-1, self.num_steps, self.hidden_size]
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size]) projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
......
...@@ -72,13 +72,13 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -72,13 +72,13 @@ class TestImperativeMnist(unittest.TestCase):
dy_mask.stop_gradient = True dy_mask.stop_gradient = True
loss_probs = paddle.log(loss_probs) loss_probs = paddle.log(loss_probs)
loss_probs = fluid.layers.elementwise_mul(loss_probs, dy_mask) loss_probs = paddle.multiply(loss_probs, dy_mask)
loss_probs = paddle.sum(loss_probs, axis=-1) loss_probs = paddle.sum(loss_probs, axis=-1)
dy_reward = fluid.dygraph.base.to_variable(reward) dy_reward = fluid.dygraph.base.to_variable(reward)
dy_reward.stop_gradient = True dy_reward.stop_gradient = True
loss_probs = fluid.layers.elementwise_mul(dy_reward, loss_probs) loss_probs = paddle.multiply(dy_reward, loss_probs)
loss = paddle.sum(loss_probs) loss = paddle.sum(loss_probs)
sgd = SGDOptimizer( sgd = SGDOptimizer(
...@@ -140,12 +140,10 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -140,12 +140,10 @@ class TestImperativeMnist(unittest.TestCase):
st_loss_probs = policy(st_state) st_loss_probs = policy(st_state)
st_loss_probs = paddle.log(st_loss_probs) st_loss_probs = paddle.log(st_loss_probs)
st_loss_probs = fluid.layers.elementwise_mul(st_loss_probs, st_mask) st_loss_probs = paddle.multiply(st_loss_probs, st_mask)
st_loss_probs = paddle.sum(st_loss_probs, axis=-1) st_loss_probs = paddle.sum(st_loss_probs, axis=-1)
st_loss_probs = fluid.layers.elementwise_mul( st_loss_probs = paddle.multiply(st_reward, st_loss_probs)
st_reward, st_loss_probs
)
st_loss = paddle.sum(st_loss_probs) st_loss = paddle.sum(st_loss_probs)
st_sgd.minimize(st_loss) st_sgd.minimize(st_loss)
......
...@@ -158,7 +158,7 @@ class BottleneckBlock(fluid.Layer): ...@@ -158,7 +158,7 @@ class BottleneckBlock(fluid.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = paddle.add(x=short, y=conv2)
layer_helper = LayerHelper(self.full_name(), act='relu') layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y) return layer_helper.append_activation(y)
......
...@@ -106,7 +106,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -106,7 +106,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([self._input, pre_hidden], 1) nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias) gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
...@@ -222,7 +222,7 @@ class PtbModel(fluid.Layer): ...@@ -222,7 +222,7 @@ class PtbModel(fluid.Layer):
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size]) projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
......
...@@ -107,7 +107,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -107,7 +107,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([self._input, pre_hidden], 1) nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias) gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
...@@ -223,7 +223,7 @@ class PtbModel(fluid.Layer): ...@@ -223,7 +223,7 @@ class PtbModel(fluid.Layer):
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size]) projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
......
...@@ -192,7 +192,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -192,7 +192,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale) y = paddle.add(x=short, y=scale)
layer_helper = LayerHelper(self.full_name(), act='relu') layer_helper = LayerHelper(self.full_name(), act='relu')
y = layer_helper.append_activation(y) y = layer_helper.append_activation(y)
......
...@@ -73,7 +73,7 @@ class SimpleNet(fluid.Layer): ...@@ -73,7 +73,7 @@ class SimpleNet(fluid.Layer):
def forward(self, input, label): def forward(self, input, label):
x_emb = self.embedding(input) x_emb = self.embedding(input)
fc = fluid.layers.matmul(x_emb, self.softmax_weight) fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias) fc = paddle.add(fc, self.softmax_bias)
projection = fluid.layers.matmul( projection = fluid.layers.matmul(
fc, paddle.transpose(self.embedding.weight, perm=[1, 0]) fc, paddle.transpose(self.embedding.weight, perm=[1, 0])
) )
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import unittest import unittest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
...@@ -31,7 +32,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase): ...@@ -31,7 +32,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase):
.global_block() .global_block()
.create_var(dtype="float32", shape=[1], lod_level=0, name="x2") .create_var(dtype="float32", shape=[1], lod_level=0, name="x2")
) )
x = fluid.layers.elementwise_add(x1, x2) x = paddle.add(x1, x2)
return x return x
def test_infer_no_need_buffer_slots(self): def test_infer_no_need_buffer_slots(self):
......
...@@ -618,11 +618,11 @@ class TestLayer(LayerTest): ...@@ -618,11 +618,11 @@ class TestLayer(LayerTest):
t5 = layers.data(name='t5', shape=[3, 3], dtype='float32') t5 = layers.data(name='t5', shape=[3, 3], dtype='float32')
t6 = layers.data(name='t6', shape=[3, 3], dtype='float32') t6 = layers.data(name='t6', shape=[3, 3], dtype='float32')
ret = layers.elementwise_add(t, t2) ret = paddle.add(t, t2)
ret = paddle.pow(ret, t3) ret = paddle.pow(ret, t3)
ret = layers.elementwise_div(ret, t4) ret = paddle.divide(ret, t4)
ret = layers.elementwise_sub(ret, t5) ret = paddle.subtract(ret, t5)
ret = layers.elementwise_mul(ret, t6) ret = paddle.multiply(ret, t6)
static_ret = self.get_static_graph_result( static_ret = self.get_static_graph_result(
feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6}, feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6},
...@@ -631,18 +631,18 @@ class TestLayer(LayerTest): ...@@ -631,18 +631,18 @@ class TestLayer(LayerTest):
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard(): with _test_eager_guard():
ret = layers.elementwise_add(to_variable(n), to_variable(n2)) ret = paddle.add(to_variable(n), to_variable(n2))
ret = paddle.pow(ret, to_variable(n3)) ret = paddle.pow(ret, to_variable(n3))
ret = layers.elementwise_div(ret, to_variable(n4)) ret = paddle.divide(ret, to_variable(n4))
ret = layers.elementwise_sub(ret, to_variable(n5)) ret = paddle.subtract(ret, to_variable(n5))
dy_eager_ret = layers.elementwise_mul(ret, to_variable(n6)) dy_eager_ret = paddle.multiply(ret, to_variable(n6))
dy_eager_ret_value = dy_eager_ret.numpy() dy_eager_ret_value = dy_eager_ret.numpy()
ret = layers.elementwise_add(to_variable(n), to_variable(n2)) ret = paddle.add(to_variable(n), to_variable(n2))
ret = paddle.pow(ret, to_variable(n3)) ret = paddle.pow(ret, to_variable(n3))
ret = layers.elementwise_div(ret, to_variable(n4)) ret = paddle.divide(ret, to_variable(n4))
ret = layers.elementwise_sub(ret, to_variable(n5)) ret = paddle.subtract(ret, to_variable(n5))
dy_ret = layers.elementwise_mul(ret, to_variable(n6)) dy_ret = paddle.multiply(ret, to_variable(n6))
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
...@@ -2606,10 +2606,10 @@ class TestLayer(LayerTest): ...@@ -2606,10 +2606,10 @@ class TestLayer(LayerTest):
def test_cond(self): def test_cond(self):
def less_than_branch(a, b): def less_than_branch(a, b):
return fluid.layers.elementwise_add(a, b) return paddle.add(a, b)
def greater_equal_branch(a, b): def greater_equal_branch(a, b):
return fluid.layers.elementwise_sub(a, b) return paddle.subtract(a, b)
with self.static_graph(): with self.static_graph():
a = fluid.layers.fill_constant( a = fluid.layers.fill_constant(
......
...@@ -16,6 +16,7 @@ import unittest ...@@ -16,6 +16,7 @@ import unittest
import numpy as np import numpy as np
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
...@@ -45,7 +46,7 @@ class TestVariable(unittest.TestCase): ...@@ -45,7 +46,7 @@ class TestVariable(unittest.TestCase):
y = fluid.dygraph.to_variable(b) y = fluid.dygraph.to_variable(b)
x.stop_gradient = False x.stop_gradient = False
res1 = layers.elementwise_add(x, y) res1 = paddle.add(x, y)
res2 = _legacy_C_ops.elementwise_add(x, y) res2 = _legacy_C_ops.elementwise_add(x, y)
np.testing.assert_array_equal(res1.numpy(), res2.numpy()) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
...@@ -57,7 +58,7 @@ class TestVariable(unittest.TestCase): ...@@ -57,7 +58,7 @@ class TestVariable(unittest.TestCase):
x = fluid.dygraph.to_variable(a) x = fluid.dygraph.to_variable(a)
y = fluid.dygraph.to_variable(b) y = fluid.dygraph.to_variable(b)
res1 = layers.elementwise_mul(x, y) res1 = paddle.multiply(x, y)
res2 = _legacy_C_ops.elementwise_mul(x, y) res2 = _legacy_C_ops.elementwise_mul(x, y)
np.testing.assert_array_equal(res1.numpy(), res2.numpy()) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
......
...@@ -97,27 +97,21 @@ class SimpleNetWithCond: ...@@ -97,27 +97,21 @@ class SimpleNetWithCond:
default_initializer=fluid.initializer.NumpyArrayInitializer(self.z), default_initializer=fluid.initializer.NumpyArrayInitializer(self.z),
) )
sum_xy = fluid.layers.elementwise_add(param_x, param_y, name='sum_xy') sum_xy = paddle.add(param_x, param_y, name='sum_xy')
sub_yz = fluid.layers.elementwise_sub(param_y, param_z, name='sub_yz') sub_yz = paddle.subtract(param_y, param_z, name='sub_yz')
useless = fluid.layers.fc(param_x, size=1, name='fc_useless') useless = fluid.layers.fc(param_x, size=1, name='fc_useless')
def cond_true(): def cond_true():
cond_yz = fluid.layers.elementwise_add( cond_yz = paddle.add(param_y, param_z, name='sum_cond_yz')
param_y, param_z, name='sum_cond_yz'
)
# param_y will not be updated # param_y will not be updated
param_y.stop_gradient = self.y_no_grad param_y.stop_gradient = self.y_no_grad
cond_res = fluid.layers.elementwise_add( cond_res = paddle.add(cond_yz, param_z, name='sum_cond_true')
cond_yz, param_z, name='sum_cond_true' cond_useless = paddle.multiply(param_x, param_y)
)
cond_useless = fluid.layers.elementwise_mul(param_x, param_y)
return cond_res return cond_res
def cond_false(): def cond_false():
cond_res = fluid.layers.elementwise_add( cond_res = paddle.add(param_y, param_z, name='sum_cond_false')
param_y, param_z, name='sum_cond_false' cond_useless = paddle.multiply(param_z, param_z)
)
cond_useless = fluid.layers.elementwise_mul(param_z, param_z)
return cond_res return cond_res
cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32')) cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32'))
......
...@@ -153,7 +153,7 @@ class RecurrentOpTest1(unittest.TestCase): ...@@ -153,7 +153,7 @@ class RecurrentOpTest1(unittest.TestCase):
x_t = rnn.step_input(x) x_t = rnn.step_input(x)
h = paddle.scale( h = paddle.scale(
x=layers.elementwise_add(x=h_pre, y=x_t), x=paddle.add(x=h_pre, y=x_t),
scale=self.py_rnn.scale, scale=self.py_rnn.scale,
) )
...@@ -317,9 +317,7 @@ class RecurrentOpTest2(RecurrentOpTest1): ...@@ -317,9 +317,7 @@ class RecurrentOpTest2(RecurrentOpTest1):
bias_attr=False, bias_attr=False,
) )
h = paddle.nn.functional.sigmoid( h = paddle.nn.functional.sigmoid(x=paddle.add(x=temp_l, y=temp_r))
x=layers.elementwise_add(x=temp_l, y=temp_r)
)
rnn.update_memory(h_pre, h) rnn.update_memory(h_pre, h)
rnn.output(h) rnn.output(h)
...@@ -491,7 +489,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): ...@@ -491,7 +489,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1):
with rnn.step(): with rnn.step():
mem_pre = rnn.memory(shape=[-1, self.input_dim], batch_ref=x) mem_pre = rnn.memory(shape=[-1, self.input_dim], batch_ref=x)
x_t = rnn.step_input(x) x_t = rnn.step_input(x)
mem = layers.elementwise_add(x=mem_pre, y=x_t) mem = paddle.add(x=mem_pre, y=x_t)
rnn.update_memory(mem_pre, mem) rnn.update_memory(mem_pre, mem)
rnn.output(mem) rnn.output(mem)
...@@ -713,9 +711,7 @@ class RecurrentOpStopGradientTest(RecurrentOpTest1): ...@@ -713,9 +711,7 @@ class RecurrentOpStopGradientTest(RecurrentOpTest1):
bias_attr=False, bias_attr=False,
) )
h = paddle.nn.functional.sigmoid( h = paddle.nn.functional.sigmoid(x=paddle.add(temp_l, temp_r))
x=layers.elementwise_add(temp_l, temp_r)
)
rnn.update_memory(h_pre, h) rnn.update_memory(h_pre, h)
rnn.output(h) rnn.output(h)
......
...@@ -75,9 +75,7 @@ class DecoderCell(layers.RNNCell): ...@@ -75,9 +75,7 @@ class DecoderCell(layers.RNNCell):
layers.unsqueeze(query, [1]), encoder_output, transpose_y=True layers.unsqueeze(query, [1]), encoder_output, transpose_y=True
) )
if encoder_padding_mask is not None: if encoder_padding_mask is not None:
attn_scores = layers.elementwise_add( attn_scores = paddle.add(attn_scores, encoder_padding_mask)
attn_scores, encoder_padding_mask
)
attn_scores = layers.softmax(attn_scores) attn_scores = layers.softmax(attn_scores)
attn_out = paddle.squeeze( attn_out = paddle.squeeze(
layers.matmul(attn_scores, encoder_output), [1] layers.matmul(attn_scores, encoder_output), [1]
......
...@@ -347,7 +347,7 @@ class TestSGDOpBF16API(unittest.TestCase): ...@@ -347,7 +347,7 @@ class TestSGDOpBF16API(unittest.TestCase):
is_sparse=False, is_sparse=False,
dtype="uint16", dtype="uint16",
) # bfloat16 ) # bfloat16
cost = fluid.layers.elementwise_add(emb, label) cost = paddle.add(emb, label)
avg_cost = paddle.mean(cost) avg_cost = paddle.mean(cost)
sgd_optimizer = paddle.optimizer.SGD( sgd_optimizer = paddle.optimizer.SGD(
......
...@@ -117,7 +117,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -117,7 +117,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([self._input, pre_hidden], 1) nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias) gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
...@@ -235,7 +235,7 @@ class PtbModel(fluid.Layer): ...@@ -235,7 +235,7 @@ class PtbModel(fluid.Layer):
rnn_out, shape=[-1, self.num_steps, self.hidden_size] rnn_out, shape=[-1, self.num_steps, self.hidden_size]
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size]) projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
......
...@@ -160,9 +160,7 @@ class TestWeightDecay(unittest.TestCase): ...@@ -160,9 +160,7 @@ class TestWeightDecay(unittest.TestCase):
optimizer.minimize(avg_cost) optimizer.minimize(avg_cost)
for params in param_list: for params in param_list:
updated_p = fluid.layers.elementwise_sub( updated_p = paddle.subtract(x=params[0], y=params[1])
x=params[0], y=params[1]
)
fluid.layers.assign(input=updated_p, output=params[0]) fluid.layers.assign(input=updated_p, output=params[0])
if use_parallel_exe: if use_parallel_exe:
......
...@@ -32,7 +32,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -32,7 +32,7 @@ class TestApiWhileLoop(unittest.TestCase):
return layers.less_than(i, ten) return layers.less_than(i, ten)
def body(i): def body(i):
return layers.elementwise_add(x=i, y=one) return paddle.add(x=i, y=one)
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
...@@ -58,7 +58,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -58,7 +58,7 @@ class TestApiWhileLoop(unittest.TestCase):
return layers.less_than(i, ten) return layers.less_than(i, ten)
def body(i, mem): def body(i, mem):
mem = layers.elementwise_add(x=mem, y=one) mem = paddle.add(x=mem, y=one)
i = layers.increment(i) i = layers.increment(i)
return [i, mem] return [i, mem]
...@@ -166,8 +166,8 @@ class TestApiWhileLoop_Nested(unittest.TestCase): ...@@ -166,8 +166,8 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
return layers.less_than(j, loop_len2) return layers.less_than(j, loop_len2)
def internal_body(j, init, sums): def internal_body(j, init, sums):
init = layers.elementwise_add(x=init, y=ones) init = paddle.add(x=init, y=ones)
sums = layers.elementwise_add(x=init, y=sums) sums = paddle.add(x=init, y=sums)
j = layers.increment(j) j = layers.increment(j)
return [j, init, sums] return [j, init, sums]
...@@ -177,7 +177,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): ...@@ -177,7 +177,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
j = result[0] j = result[0]
init = result[1] init = result[1]
sums = result[2] sums = result[2]
sums = layers.elementwise_add(x=init, y=sums) sums = paddle.add(x=init, y=sums)
i = layers.increment(i) i = layers.increment(i)
return [i, j, init, sums] return [i, j, init, sums]
...@@ -222,7 +222,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase): ...@@ -222,7 +222,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase):
return layers.less_than(i, eleven) return layers.less_than(i, eleven)
def body(i, x): def body(i, x):
x = layers.elementwise_mul(x=i, y=i) x = paddle.multiply(x=i, y=i)
i = layers.increment(i) i = layers.increment(i)
return [i, x] return [i, x]
...@@ -316,16 +316,16 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -316,16 +316,16 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
def internal_body(j, x, mem_array): def internal_body(j, x, mem_array):
inner_data = layers.array_read(array=data_array, i=j) inner_data = layers.array_read(array=data_array, i=j)
inner_prev = layers.array_read(array=mem_array, i=j) inner_prev = layers.array_read(array=mem_array, i=j)
inner_sum_0 = layers.elementwise_add(x=inner_data, y=inner_prev) inner_sum_0 = paddle.add(x=inner_data, y=inner_prev)
inner_sum_1 = layers.elementwise_add(x=x, y=inner_sum_0) inner_sum_1 = paddle.add(x=x, y=inner_sum_0)
j = layers.increment(x=j, in_place=True) j = layers.increment(x=j, in_place=True)
layers.array_write(inner_sum_1, i=j, array=mem_array) layers.array_write(inner_sum_1, i=j, array=mem_array)
return [j, x, mem_array] return [j, x, mem_array]
outer_data = layers.array_read(array=data_array, i=i) outer_data = layers.array_read(array=data_array, i=i)
outer_prev = layers.array_read(array=mem_array, i=i) outer_prev = layers.array_read(array=mem_array, i=i)
outer_sum_0 = layers.elementwise_add(x=outer_data, y=outer_prev) outer_sum_0 = paddle.add(x=outer_data, y=outer_prev)
outer_sum_1 = layers.elementwise_add(x=x, y=outer_sum_0) outer_sum_1 = paddle.add(x=x, y=outer_sum_0)
i = layers.increment(x=i, in_place=True) i = layers.increment(x=i, in_place=True)
layers.array_write(outer_sum_1, i=i, array=mem_array) layers.array_write(outer_sum_1, i=i, array=mem_array)
j, x, mem_array = layers.while_loop( j, x, mem_array = layers.while_loop(
...@@ -394,15 +394,15 @@ class TestApiWhileLoopWithSwitchCase(unittest.TestCase): ...@@ -394,15 +394,15 @@ class TestApiWhileLoopWithSwitchCase(unittest.TestCase):
def body(i): def body(i):
def fn_add_three(): def fn_add_three():
data_add_three = layers.elementwise_add(x=i, y=three) data_add_three = paddle.add(x=i, y=three)
return data_add_three return data_add_three
def fn_square(): def fn_square():
data_mul_data = layers.elementwise_mul(x=i, y=i) data_mul_data = paddle.multiply(x=i, y=i)
return data_mul_data return data_mul_data
def fn_add_one(): def fn_add_one():
data_add_one = layers.elementwise_add(x=i, y=one) data_add_one = paddle.add(x=i, y=one)
return data_add_one return data_add_one
return layers.switch_case( return layers.switch_case(
......
...@@ -164,7 +164,7 @@ def multi_head_attention( ...@@ -164,7 +164,7 @@ def multi_head_attention(
scaled_q = paddle.scale(x=q, scale=d_model**-0.5) scaled_q = paddle.scale(x=q, scale=d_model**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True) product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
weights = __softmax(layers.elementwise_add(x=product, y=attn_bias)) weights = __softmax(paddle.add(x=product, y=attn_bias))
if dropout_rate: if dropout_rate:
weights = layers.dropout( weights = layers.dropout(
weights, dropout_prob=dropout_rate, is_test=False weights, dropout_prob=dropout_rate, is_test=False
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册