未验证 提交 9218e742 编写于 作者: H HongyuJia 提交者: GitHub

clean elem_arithmetic part5 unittest (#48466)

上级 96a8bbe7
...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -42,7 +42,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -42,7 +42,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -52,13 +52,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -52,13 +52,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -43,7 +43,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -43,7 +43,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -53,13 +53,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -53,13 +53,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -42,7 +42,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -42,7 +42,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -52,13 +52,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -52,13 +52,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestPSPassWithBow(unittest.TestCase):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -49,13 +49,13 @@ class TestPSPassWithBow(unittest.TestCase):
return acc return acc
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -403,9 +403,9 @@ class TestFakeInit(TranspilerTest): ...@@ -403,9 +403,9 @@ class TestFakeInit(TranspilerTest):
neg_emb_b_vec = paddle.reshape(neg_emb_b, shape=[-1, neg_num]) neg_emb_b_vec = paddle.reshape(neg_emb_b, shape=[-1, neg_num])
true_logits = fluid.layers.elementwise_add( true_logits = paddle.add(
paddle.sum( paddle.sum(
fluid.layers.elementwise_mul(input_emb, true_emb_w), paddle.multiply(input_emb, true_emb_w),
dim=1, dim=1,
keep_dim=True, keep_dim=True,
), ),
...@@ -418,7 +418,7 @@ class TestFakeInit(TranspilerTest): ...@@ -418,7 +418,7 @@ class TestFakeInit(TranspilerTest):
input_emb_re, neg_emb_w_re, transpose_y=True input_emb_re, neg_emb_w_re, transpose_y=True
) )
neg_matmul_re = paddle.reshape(neg_matmul, shape=[-1, neg_num]) neg_matmul_re = paddle.reshape(neg_matmul, shape=[-1, neg_num])
neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec) neg_logits = paddle.add(neg_matmul_re, neg_emb_b_vec)
# nce loss # nce loss
label_ones = fluid.layers.fill_constant_batch_size_like( label_ones = fluid.layers.fill_constant_batch_size_like(
true_logits, shape=[-1, 1], value=1.0, dtype='float32' true_logits, shape=[-1, 1], value=1.0, dtype='float32'
...@@ -433,7 +433,7 @@ class TestFakeInit(TranspilerTest): ...@@ -433,7 +433,7 @@ class TestFakeInit(TranspilerTest):
neg_xent = paddle.nn.functional.binary_cross_entropy_with_logits( neg_xent = paddle.nn.functional.binary_cross_entropy_with_logits(
neg_logits, label_zeros neg_logits, label_zeros
) )
cost = fluid.layers.elementwise_add( cost = paddle.add(
paddle.sum(true_xent, axis=1), paddle.sum(true_xent, axis=1),
paddle.sum(neg_xent, axis=1), paddle.sum(neg_xent, axis=1),
) )
......
...@@ -169,7 +169,7 @@ def lm_model( ...@@ -169,7 +169,7 @@ def lm_model(
nn = layers.concat([input, pre_hidden], 1) nn = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=nn, y=weight_1) gate_input = layers.matmul(x=nn, y=weight_1)
gate_input = layers.elementwise_add(gate_input, bias) gate_input = paddle.add(gate_input, bias)
i = paddle.slice( i = paddle.slice(
gate_input, axes=[1], starts=[0], ends=[hidden_size] gate_input, axes=[1], starts=[0], ends=[hidden_size]
) )
...@@ -293,7 +293,7 @@ def lm_model( ...@@ -293,7 +293,7 @@ def lm_model(
nn = layers.concat([input, pre_hidden], 1) nn = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=nn, y=weight_1) gate_input = layers.matmul(x=nn, y=weight_1)
gate_input = layers.elementwise_add(gate_input, bias) gate_input = paddle.add(gate_input, bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
c = pre_cell * paddle.nn.functional.sigmoid( c = pre_cell * paddle.nn.functional.sigmoid(
...@@ -460,7 +460,7 @@ def lm_model( ...@@ -460,7 +460,7 @@ def lm_model(
) )
projection = layers.matmul(rnn_out, softmax_weight) projection = layers.matmul(rnn_out, softmax_weight)
projection = layers.elementwise_add(projection, softmax_bias) projection = paddle.add(projection, softmax_bias)
projection = paddle.reshape(projection, shape=[-1, vocab_size]) projection = paddle.reshape(projection, shape=[-1, vocab_size])
loss = layers.softmax_with_cross_entropy( loss = layers.softmax_with_cross_entropy(
......
...@@ -157,7 +157,7 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): ...@@ -157,7 +157,7 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase):
x_t = rnn.step_input(x) x_t = rnn.step_input(x)
h = paddle.scale( h = paddle.scale(
x=layers.elementwise_add(x=h_pre, y=x_t), x=paddle.add(x=h_pre, y=x_t),
scale=self.py_rnn.scale, scale=self.py_rnn.scale,
) )
...@@ -328,9 +328,7 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1): ...@@ -328,9 +328,7 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1):
bias_attr=False, bias_attr=False,
) )
h = paddle.nn.functional.sigmoid( h = paddle.nn.functional.sigmoid(x=paddle.add(x=temp_l, y=temp_r))
x=layers.elementwise_add(x=temp_l, y=temp_r)
)
rnn.update_memory(h_pre, h) rnn.update_memory(h_pre, h)
rnn.output(h) rnn.output(h)
...@@ -504,7 +502,7 @@ class EagerDeletionRecurrentOpNoMemBootTest(EagerDeletionRecurrentOpTest1): ...@@ -504,7 +502,7 @@ class EagerDeletionRecurrentOpNoMemBootTest(EagerDeletionRecurrentOpTest1):
with rnn.step(): with rnn.step():
mem_pre = rnn.memory(shape=[-1, self.input_dim], batch_ref=x) mem_pre = rnn.memory(shape=[-1, self.input_dim], batch_ref=x)
x_t = rnn.step_input(x) x_t = rnn.step_input(x)
mem = layers.elementwise_add(x=mem_pre, y=x_t) mem = paddle.add(x=mem_pre, y=x_t)
rnn.update_memory(mem_pre, mem) rnn.update_memory(mem_pre, mem)
rnn.output(mem) rnn.output(mem)
...@@ -584,7 +582,7 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1): ...@@ -584,7 +582,7 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1):
with rnn_0.step(): with rnn_0.step():
x_t = rnn_0.step_input(x) x_t = rnn_0.step_input(x)
mem_pre = rnn_0.memory(shape=[-1, self.input_dim], batch_ref=x) mem_pre = rnn_0.memory(shape=[-1, self.input_dim], batch_ref=x)
mem = layers.elementwise_add(x=mem_pre, y=x_t) mem = paddle.add(x=mem_pre, y=x_t)
rnn_0.update_memory(mem_pre, mem) rnn_0.update_memory(mem_pre, mem)
rnn_0.output(mem) rnn_0.output(mem)
...@@ -594,8 +592,8 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1): ...@@ -594,8 +592,8 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1):
x_t = rnn_1.step_input(x) x_t = rnn_1.step_input(x)
last_rnn_output = rnn_0() last_rnn_output = rnn_0()
last_rnn_sum = paddle.sum(last_rnn_output) last_rnn_sum = paddle.sum(last_rnn_output)
mem = layers.elementwise_add(x=x_t, y=last_rnn_sum) mem = paddle.add(x=x_t, y=last_rnn_sum)
y = layers.elementwise_add(x=mem_pre, y=mem) y = paddle.add(x=mem_pre, y=mem)
rnn_1.update_memory(mem_pre, mem) rnn_1.update_memory(mem_pre, mem)
rnn_1.output(y) rnn_1.output(y)
return rnn_1() return rnn_1()
...@@ -693,7 +691,7 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( ...@@ -693,7 +691,7 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest(
x_t = forward_only_rnn.step_input(x) x_t = forward_only_rnn.step_input(x)
h = paddle.scale( h = paddle.scale(
x=layers.elementwise_add(x=h_pre, y=x_t), x=paddle.add(x=h_pre, y=x_t),
scale=self.py_rnn.scale, scale=self.py_rnn.scale,
) )
...@@ -709,7 +707,7 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( ...@@ -709,7 +707,7 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest(
x_t = rnn.step_input(x) x_t = rnn.step_input(x)
h = paddle.scale( h = paddle.scale(
x=layers.elementwise_add(x=h_pre, y=x_t), x=paddle.add(x=h_pre, y=x_t),
scale=self.py_rnn.scale, scale=self.py_rnn.scale,
) )
......
...@@ -94,7 +94,7 @@ class TestElementwiseAddDoubleGradCheck(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestElementwiseAddDoubleGradCheck(unittest.TestCase):
y = layers.data('y', shape, False, dtype) y = layers.data('y', shape, False, dtype)
x.persistable = True x.persistable = True
y.persistable = True y.persistable = True
out = layers.elementwise_add(x, y) out = paddle.add(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
...@@ -155,7 +155,7 @@ class TestElementwiseSubDoubleGradCheck(unittest.TestCase): ...@@ -155,7 +155,7 @@ class TestElementwiseSubDoubleGradCheck(unittest.TestCase):
y = layers.data('y', shape, False, dtype) y = layers.data('y', shape, False, dtype)
x.persistable = True x.persistable = True
y.persistable = True y.persistable = True
out = layers.elementwise_sub(x, y) out = paddle.subtract(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
...@@ -291,7 +291,7 @@ class TestElementwiseAddTripleGradCheck(unittest.TestCase): ...@@ -291,7 +291,7 @@ class TestElementwiseAddTripleGradCheck(unittest.TestCase):
y = layers.data('y', shape, False, dtype) y = layers.data('y', shape, False, dtype)
x.persistable = True x.persistable = True
y.persistable = True y.persistable = True
out = layers.elementwise_add(x, y) out = paddle.add(x, y)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
y_arr = np.random.uniform(-1, 1, shape).astype(dtype) y_arr = np.random.uniform(-1, 1, shape).astype(dtype)
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import unittest import unittest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.nets as nets import paddle.fluid.nets as nets
from paddle.fluid.framework import Program from paddle.fluid.framework import Program
...@@ -81,7 +82,7 @@ class TestLayer(unittest.TestCase): ...@@ -81,7 +82,7 @@ class TestLayer(unittest.TestCase):
image2 = fluid.layers.data( image2 = fluid.layers.data(
name='pixel2', shape=[3, 48, 48], dtype='float32' name='pixel2', shape=[3, 48, 48], dtype='float32'
) )
fluid.layers.elementwise_add(x=image1, y=image2, act='relu') paddle.nn.functional.relu(paddle.add(x=image1, y=image2))
print(main_program) print(main_program)
......
...@@ -33,7 +33,7 @@ class MyLayer(fluid.Layer): ...@@ -33,7 +33,7 @@ class MyLayer(fluid.Layer):
def forward(self, inputs): def forward(self, inputs):
x = fluid.layers.relu(inputs) x = fluid.layers.relu(inputs)
self._x_for_debug = x self._x_for_debug = x
x = fluid.layers.elementwise_mul(x, x) x = paddle.multiply(x, x)
x = paddle.sum(x) x = paddle.sum(x)
return [x] return [x]
...@@ -722,9 +722,9 @@ class TestImperative(unittest.TestCase): ...@@ -722,9 +722,9 @@ class TestImperative(unittest.TestCase):
inp1 = paddle.to_tensor(np_inp1) inp1 = paddle.to_tensor(np_inp1)
inp2 = paddle.to_tensor(np_inp2) inp2 = paddle.to_tensor(np_inp2)
if np.sum(np_inp1) < np.sum(np_inp2): if np.sum(np_inp1) < np.sum(np_inp2):
x = fluid.layers.elementwise_add(inp1, inp2) x = paddle.add(inp1, inp2)
else: else:
x = fluid.layers.elementwise_sub(inp1, inp2) x = paddle.subtract(inp1, inp2)
dygraph_result = x.numpy() dygraph_result = x.numpy()
# static graph # static graph
...@@ -750,13 +750,13 @@ class TestImperative(unittest.TestCase): ...@@ -750,13 +750,13 @@ class TestImperative(unittest.TestCase):
with ie.true_block(): with ie.true_block():
d1 = ie.input(inp_data1) d1 = ie.input(inp_data1)
d2 = ie.input(inp_data2) d2 = ie.input(inp_data2)
d3 = fluid.layers.elementwise_add(d1, d2) d3 = paddle.add(d1, d2)
ie.output(d3) ie.output(d3)
with ie.false_block(): with ie.false_block():
d1 = ie.input(inp_data1) d1 = ie.input(inp_data1)
d2 = ie.input(inp_data2) d2 = ie.input(inp_data2)
d3 = fluid.layers.elementwise_sub(d1, d2) d3 = paddle.subtract(d1, d2)
ie.output(d3) ie.output(d3)
out = ie() out = ie()
......
...@@ -76,7 +76,7 @@ class DMF(fluid.Layer): ...@@ -76,7 +76,7 @@ class DMF(fluid.Layer):
for ul, il in zip(self._user_layers, self._item_layers): for ul, il in zip(self._user_layers, self._item_layers):
users = ul(users) users = ul(users)
items = il(items) items = il(items)
return fluid.layers.elementwise_mul(users, items) return paddle.multiply(users, items)
class MLP(fluid.Layer): class MLP(fluid.Layer):
......
...@@ -67,7 +67,7 @@ class SimpleNet(fluid.Layer): ...@@ -67,7 +67,7 @@ class SimpleNet(fluid.Layer):
projection = fluid.layers.matmul( projection = fluid.layers.matmul(
x_emb, paddle.transpose(self.embedding.weight, perm=[1, 0]) x_emb, paddle.transpose(self.embedding.weight, perm=[1, 0])
) )
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = paddle.add(projection, self.softmax_bias)
projection = paddle.reshape(projection, shape=[-1, self.vocab_size]) projection = paddle.reshape(projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
......
...@@ -306,9 +306,7 @@ class SimpleAttention(fluid.dygraph.Layer): ...@@ -306,9 +306,7 @@ class SimpleAttention(fluid.dygraph.Layer):
decoder_state_proj_reshape, decoder_state_proj_reshape,
[-1, encoder_proj.shape[1], -1], [-1, encoder_proj.shape[1], -1],
) )
concated = fluid.layers.elementwise_add( concated = paddle.add(encoder_proj, decoder_state_expand)
encoder_proj, decoder_state_expand
)
concated = paddle.tanh(x=concated) concated = paddle.tanh(x=concated)
attention_weight = self.fc_2(concated) attention_weight = self.fc_2(concated)
...@@ -362,7 +360,7 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer): ...@@ -362,7 +360,7 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer):
) )
fc_1 = self.fc_1_layer(context) fc_1 = self.fc_1_layer(context)
fc_2 = self.fc_2_layer(current_word) fc_2 = self.fc_2_layer(current_word)
decoder_inputs = fluid.layers.elementwise_add(x=fc_1, y=fc_2) decoder_inputs = paddle.add(x=fc_1, y=fc_2)
h, _, _ = self.gru_unit(decoder_inputs, hidden_mem) h, _, _ = self.gru_unit(decoder_inputs, hidden_mem)
hidden_mem = h hidden_mem = h
......
...@@ -35,7 +35,7 @@ class MyLayer(fluid.Layer): ...@@ -35,7 +35,7 @@ class MyLayer(fluid.Layer):
def forward(self, inputs): def forward(self, inputs):
x = fluid.layers.relu(inputs) x = fluid.layers.relu(inputs)
x = fluid.layers.elementwise_mul(x, x) x = paddle.multiply(x, x)
x = paddle.sum(x) x = paddle.sum(x)
return [x] return [x]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册