未验证 提交 5789ac5a 编写于 作者: H HongyuJia 提交者: GitHub

[Clean fluid] Clean fluid elementwise_arithmetic (part6 unit test) (#48467)

* clean elem_arithmetic part6 unittest

* delete op_name_conflict unittest

* restore test_op_name_conflict

* fix codestyle test_op_function_generator
上级 dab1896d
......@@ -111,7 +111,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
)
......@@ -226,7 +226,7 @@ class PtbModel(fluid.Layer):
rnn_out, shape=[-1, self.num_steps, self.hidden_size]
)
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False
......
......@@ -72,13 +72,13 @@ class TestImperativeMnist(unittest.TestCase):
dy_mask.stop_gradient = True
loss_probs = paddle.log(loss_probs)
loss_probs = fluid.layers.elementwise_mul(loss_probs, dy_mask)
loss_probs = paddle.multiply(loss_probs, dy_mask)
loss_probs = paddle.sum(loss_probs, axis=-1)
dy_reward = fluid.dygraph.base.to_variable(reward)
dy_reward.stop_gradient = True
loss_probs = fluid.layers.elementwise_mul(dy_reward, loss_probs)
loss_probs = paddle.multiply(dy_reward, loss_probs)
loss = paddle.sum(loss_probs)
sgd = SGDOptimizer(
......@@ -140,12 +140,10 @@ class TestImperativeMnist(unittest.TestCase):
st_loss_probs = policy(st_state)
st_loss_probs = paddle.log(st_loss_probs)
st_loss_probs = fluid.layers.elementwise_mul(st_loss_probs, st_mask)
st_loss_probs = paddle.multiply(st_loss_probs, st_mask)
st_loss_probs = paddle.sum(st_loss_probs, axis=-1)
st_loss_probs = fluid.layers.elementwise_mul(
st_reward, st_loss_probs
)
st_loss_probs = paddle.multiply(st_reward, st_loss_probs)
st_loss = paddle.sum(st_loss_probs)
st_sgd.minimize(st_loss)
......
......@@ -158,7 +158,7 @@ class BottleneckBlock(fluid.Layer):
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2)
y = paddle.add(x=short, y=conv2)
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
......
......@@ -106,7 +106,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
)
......@@ -222,7 +222,7 @@ class PtbModel(fluid.Layer):
)
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False
......
......@@ -107,7 +107,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
)
......@@ -223,7 +223,7 @@ class PtbModel(fluid.Layer):
)
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False
......
......@@ -192,7 +192,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale)
y = paddle.add(x=short, y=scale)
layer_helper = LayerHelper(self.full_name(), act='relu')
y = layer_helper.append_activation(y)
......
......@@ -73,7 +73,7 @@ class SimpleNet(fluid.Layer):
def forward(self, input, label):
x_emb = self.embedding(input)
fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
fc = paddle.add(fc, self.softmax_bias)
projection = fluid.layers.matmul(
fc, paddle.transpose(self.embedding.weight, perm=[1, 0])
)
......
......@@ -14,6 +14,7 @@
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
......@@ -31,7 +32,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase):
.global_block()
.create_var(dtype="float32", shape=[1], lod_level=0, name="x2")
)
x = fluid.layers.elementwise_add(x1, x2)
x = paddle.add(x1, x2)
return x
def test_infer_no_need_buffer_slots(self):
......
......@@ -618,11 +618,11 @@ class TestLayer(LayerTest):
t5 = layers.data(name='t5', shape=[3, 3], dtype='float32')
t6 = layers.data(name='t6', shape=[3, 3], dtype='float32')
ret = layers.elementwise_add(t, t2)
ret = paddle.add(t, t2)
ret = paddle.pow(ret, t3)
ret = layers.elementwise_div(ret, t4)
ret = layers.elementwise_sub(ret, t5)
ret = layers.elementwise_mul(ret, t6)
ret = paddle.divide(ret, t4)
ret = paddle.subtract(ret, t5)
ret = paddle.multiply(ret, t6)
static_ret = self.get_static_graph_result(
feed={'t': n, 't2': n2, 't3': n3, 't4': n4, 't5': n5, 't6': n6},
......@@ -631,18 +631,18 @@ class TestLayer(LayerTest):
with self.dynamic_graph():
with _test_eager_guard():
ret = layers.elementwise_add(to_variable(n), to_variable(n2))
ret = paddle.add(to_variable(n), to_variable(n2))
ret = paddle.pow(ret, to_variable(n3))
ret = layers.elementwise_div(ret, to_variable(n4))
ret = layers.elementwise_sub(ret, to_variable(n5))
dy_eager_ret = layers.elementwise_mul(ret, to_variable(n6))
ret = paddle.divide(ret, to_variable(n4))
ret = paddle.subtract(ret, to_variable(n5))
dy_eager_ret = paddle.multiply(ret, to_variable(n6))
dy_eager_ret_value = dy_eager_ret.numpy()
ret = layers.elementwise_add(to_variable(n), to_variable(n2))
ret = paddle.add(to_variable(n), to_variable(n2))
ret = paddle.pow(ret, to_variable(n3))
ret = layers.elementwise_div(ret, to_variable(n4))
ret = layers.elementwise_sub(ret, to_variable(n5))
dy_ret = layers.elementwise_mul(ret, to_variable(n6))
ret = paddle.divide(ret, to_variable(n4))
ret = paddle.subtract(ret, to_variable(n5))
dy_ret = paddle.multiply(ret, to_variable(n6))
dy_ret_value = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
......@@ -2606,10 +2606,10 @@ class TestLayer(LayerTest):
def test_cond(self):
def less_than_branch(a, b):
return fluid.layers.elementwise_add(a, b)
return paddle.add(a, b)
def greater_equal_branch(a, b):
return fluid.layers.elementwise_sub(a, b)
return paddle.subtract(a, b)
with self.static_graph():
a = fluid.layers.fill_constant(
......
......@@ -16,6 +16,7 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle import _legacy_C_ops
......@@ -45,7 +46,7 @@ class TestVariable(unittest.TestCase):
y = fluid.dygraph.to_variable(b)
x.stop_gradient = False
res1 = layers.elementwise_add(x, y)
res1 = paddle.add(x, y)
res2 = _legacy_C_ops.elementwise_add(x, y)
np.testing.assert_array_equal(res1.numpy(), res2.numpy())
......@@ -57,7 +58,7 @@ class TestVariable(unittest.TestCase):
x = fluid.dygraph.to_variable(a)
y = fluid.dygraph.to_variable(b)
res1 = layers.elementwise_mul(x, y)
res1 = paddle.multiply(x, y)
res2 = _legacy_C_ops.elementwise_mul(x, y)
np.testing.assert_array_equal(res1.numpy(), res2.numpy())
......
......@@ -97,27 +97,21 @@ class SimpleNetWithCond:
default_initializer=fluid.initializer.NumpyArrayInitializer(self.z),
)
sum_xy = fluid.layers.elementwise_add(param_x, param_y, name='sum_xy')
sub_yz = fluid.layers.elementwise_sub(param_y, param_z, name='sub_yz')
sum_xy = paddle.add(param_x, param_y, name='sum_xy')
sub_yz = paddle.subtract(param_y, param_z, name='sub_yz')
useless = fluid.layers.fc(param_x, size=1, name='fc_useless')
def cond_true():
cond_yz = fluid.layers.elementwise_add(
param_y, param_z, name='sum_cond_yz'
)
cond_yz = paddle.add(param_y, param_z, name='sum_cond_yz')
# param_y will not be updated
param_y.stop_gradient = self.y_no_grad
cond_res = fluid.layers.elementwise_add(
cond_yz, param_z, name='sum_cond_true'
)
cond_useless = fluid.layers.elementwise_mul(param_x, param_y)
cond_res = paddle.add(cond_yz, param_z, name='sum_cond_true')
cond_useless = paddle.multiply(param_x, param_y)
return cond_res
def cond_false():
cond_res = fluid.layers.elementwise_add(
param_y, param_z, name='sum_cond_false'
)
cond_useless = fluid.layers.elementwise_mul(param_z, param_z)
cond_res = paddle.add(param_y, param_z, name='sum_cond_false')
cond_useless = paddle.multiply(param_z, param_z)
return cond_res
cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32'))
......
......@@ -153,7 +153,7 @@ class RecurrentOpTest1(unittest.TestCase):
x_t = rnn.step_input(x)
h = paddle.scale(
x=layers.elementwise_add(x=h_pre, y=x_t),
x=paddle.add(x=h_pre, y=x_t),
scale=self.py_rnn.scale,
)
......@@ -317,9 +317,7 @@ class RecurrentOpTest2(RecurrentOpTest1):
bias_attr=False,
)
h = paddle.nn.functional.sigmoid(
x=layers.elementwise_add(x=temp_l, y=temp_r)
)
h = paddle.nn.functional.sigmoid(x=paddle.add(x=temp_l, y=temp_r))
rnn.update_memory(h_pre, h)
rnn.output(h)
......@@ -491,7 +489,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1):
with rnn.step():
mem_pre = rnn.memory(shape=[-1, self.input_dim], batch_ref=x)
x_t = rnn.step_input(x)
mem = layers.elementwise_add(x=mem_pre, y=x_t)
mem = paddle.add(x=mem_pre, y=x_t)
rnn.update_memory(mem_pre, mem)
rnn.output(mem)
......@@ -713,9 +711,7 @@ class RecurrentOpStopGradientTest(RecurrentOpTest1):
bias_attr=False,
)
h = paddle.nn.functional.sigmoid(
x=layers.elementwise_add(temp_l, temp_r)
)
h = paddle.nn.functional.sigmoid(x=paddle.add(temp_l, temp_r))
rnn.update_memory(h_pre, h)
rnn.output(h)
......
......@@ -75,9 +75,7 @@ class DecoderCell(layers.RNNCell):
layers.unsqueeze(query, [1]), encoder_output, transpose_y=True
)
if encoder_padding_mask is not None:
attn_scores = layers.elementwise_add(
attn_scores, encoder_padding_mask
)
attn_scores = paddle.add(attn_scores, encoder_padding_mask)
attn_scores = layers.softmax(attn_scores)
attn_out = paddle.squeeze(
layers.matmul(attn_scores, encoder_output), [1]
......
......@@ -347,7 +347,7 @@ class TestSGDOpBF16API(unittest.TestCase):
is_sparse=False,
dtype="uint16",
) # bfloat16
cost = fluid.layers.elementwise_add(emb, label)
cost = paddle.add(emb, label)
avg_cost = paddle.mean(cost)
sgd_optimizer = paddle.optimizer.SGD(
......
......@@ -117,7 +117,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1
)
......@@ -235,7 +235,7 @@ class PtbModel(fluid.Layer):
rnn_out, shape=[-1, self.num_steps, self.hidden_size]
)
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False
......
......@@ -160,9 +160,7 @@ class TestWeightDecay(unittest.TestCase):
optimizer.minimize(avg_cost)
for params in param_list:
updated_p = fluid.layers.elementwise_sub(
x=params[0], y=params[1]
)
updated_p = paddle.subtract(x=params[0], y=params[1])
fluid.layers.assign(input=updated_p, output=params[0])
if use_parallel_exe:
......
......@@ -32,7 +32,7 @@ class TestApiWhileLoop(unittest.TestCase):
return layers.less_than(i, ten)
def body(i):
return layers.elementwise_add(x=i, y=one)
return paddle.add(x=i, y=one)
main_program = Program()
startup_program = Program()
......@@ -58,7 +58,7 @@ class TestApiWhileLoop(unittest.TestCase):
return layers.less_than(i, ten)
def body(i, mem):
mem = layers.elementwise_add(x=mem, y=one)
mem = paddle.add(x=mem, y=one)
i = layers.increment(i)
return [i, mem]
......@@ -166,8 +166,8 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
return layers.less_than(j, loop_len2)
def internal_body(j, init, sums):
init = layers.elementwise_add(x=init, y=ones)
sums = layers.elementwise_add(x=init, y=sums)
init = paddle.add(x=init, y=ones)
sums = paddle.add(x=init, y=sums)
j = layers.increment(j)
return [j, init, sums]
......@@ -177,7 +177,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
j = result[0]
init = result[1]
sums = result[2]
sums = layers.elementwise_add(x=init, y=sums)
sums = paddle.add(x=init, y=sums)
i = layers.increment(i)
return [i, j, init, sums]
......@@ -222,7 +222,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase):
return layers.less_than(i, eleven)
def body(i, x):
x = layers.elementwise_mul(x=i, y=i)
x = paddle.multiply(x=i, y=i)
i = layers.increment(i)
return [i, x]
......@@ -316,16 +316,16 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
def internal_body(j, x, mem_array):
inner_data = layers.array_read(array=data_array, i=j)
inner_prev = layers.array_read(array=mem_array, i=j)
inner_sum_0 = layers.elementwise_add(x=inner_data, y=inner_prev)
inner_sum_1 = layers.elementwise_add(x=x, y=inner_sum_0)
inner_sum_0 = paddle.add(x=inner_data, y=inner_prev)
inner_sum_1 = paddle.add(x=x, y=inner_sum_0)
j = layers.increment(x=j, in_place=True)
layers.array_write(inner_sum_1, i=j, array=mem_array)
return [j, x, mem_array]
outer_data = layers.array_read(array=data_array, i=i)
outer_prev = layers.array_read(array=mem_array, i=i)
outer_sum_0 = layers.elementwise_add(x=outer_data, y=outer_prev)
outer_sum_1 = layers.elementwise_add(x=x, y=outer_sum_0)
outer_sum_0 = paddle.add(x=outer_data, y=outer_prev)
outer_sum_1 = paddle.add(x=x, y=outer_sum_0)
i = layers.increment(x=i, in_place=True)
layers.array_write(outer_sum_1, i=i, array=mem_array)
j, x, mem_array = layers.while_loop(
......@@ -394,15 +394,15 @@ class TestApiWhileLoopWithSwitchCase(unittest.TestCase):
def body(i):
def fn_add_three():
data_add_three = layers.elementwise_add(x=i, y=three)
data_add_three = paddle.add(x=i, y=three)
return data_add_three
def fn_square():
data_mul_data = layers.elementwise_mul(x=i, y=i)
data_mul_data = paddle.multiply(x=i, y=i)
return data_mul_data
def fn_add_one():
data_add_one = layers.elementwise_add(x=i, y=one)
data_add_one = paddle.add(x=i, y=one)
return data_add_one
return layers.switch_case(
......
......@@ -164,7 +164,7 @@ def multi_head_attention(
scaled_q = paddle.scale(x=q, scale=d_model**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
weights = __softmax(layers.elementwise_add(x=product, y=attn_bias))
weights = __softmax(paddle.add(x=product, y=attn_bias))
if dropout_rate:
weights = layers.dropout(
weights, dropout_prob=dropout_rate, is_test=False
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册