diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py index 306bcb5a8e920b493c43bf25fcf8744807da759d..eb128f9be75fa564b7bfd5997733f01cae08854a 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py @@ -57,7 +57,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) - acc = fluid.layers.elementwise_div( + acc = paddle.divide( cond_3, fluid.layers.fill_constant( shape=[1], value=batch_size * 1.0, dtype='float64' @@ -68,13 +68,13 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): def get_loss(cos_q_pt, cos_q_nt): - loss_op1 = fluid.layers.elementwise_sub( + loss_op1 = paddle.subtract( fluid.layers.fill_constant_batch_size_like( input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' ), cos_q_pt, ) - loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) + loss_op2 = paddle.add(loss_op1, cos_q_nt) loss_op3 = paddle.maximum( fluid.layers.fill_constant_batch_size_like( input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' diff --git a/python/paddle/fluid/tests/unittests/dist_se_resnext.py b/python/paddle/fluid/tests/unittests/dist_se_resnext.py index 83befa76062d1a830454bcb8b655bb71ebb1d260..3461be8228fc7cad72daa0b657aeb1c3f0752b6e 100644 --- a/python/paddle/fluid/tests/unittests/dist_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/dist_se_resnext.py @@ -163,7 +163,7 @@ class SE_ResNeXt: short = self.shortcut(input, num_filters * 2, stride) - return fluid.layers.elementwise_add(x=short, y=scale, act='relu') + return paddle.nn.functional.relu(paddle.add(x=short, y=scale)) def conv_bn_layer( self, input, num_filters, filter_size, stride=1, groups=1, act=None diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py index db05875d2314f6590f7c31eb566788fa20fddf46..9199d0c2d96b2666f9a8a03d6f2fc53a03a14de2 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/darknet.py @@ -114,7 +114,7 @@ class BasicBlock(fluid.dygraph.Layer): def forward(self, inputs): conv1 = self.conv1(inputs) conv2 = self.conv2(conv1) - out = fluid.layers.elementwise_add(x=inputs, y=conv2, act=None) + out = paddle.add(x=inputs, y=conv2) return out diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py index bfc23a71fe571ff87210db90f6631fe0a0794661..e7af14446410f63ddb749e89582ed52bf0e8e50e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py @@ -72,15 +72,13 @@ class BasicLSTMUnit(Layer): concat_input_hidden = layers.concat([input, pre_hidden], 1) gate_input = layers.matmul(x=concat_input_hidden, y=self._weight) - gate_input = layers.elementwise_add(gate_input, self._bias) + gate_input = paddle.add(gate_input, self._bias) i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) - new_cell = layers.elementwise_add( - layers.elementwise_mul( + new_cell = paddle.add( + paddle.multiply( pre_cell, paddle.nn.functional.sigmoid(f + self._forget_bias) ), - layers.elementwise_mul( - paddle.nn.functional.sigmoid(i), paddle.tanh(j) - ), + paddle.multiply(paddle.nn.functional.sigmoid(i), paddle.tanh(j)), ) new_hidden = paddle.tanh(new_cell) * paddle.nn.functional.sigmoid(o) @@ -442,13 +440,12 @@ class BaseModel(fluid.dygraph.Layer): np.array(noend_array, dtype='float32') ) - step_log_probs = fluid.layers.elementwise_mul( + step_log_probs = paddle.multiply( paddle.expand( fluid.layers.unsqueeze(beam_finished, [2]), [-1, -1, self.tar_vocab_size], ), noend_mask_tensor, - axis=-1, ) - fluid.layers.elementwise_mul( step_log_probs, (beam_finished - 1), axis=0 ) @@ -693,7 +690,7 @@ class AttentionModel(fluid.dygraph.Layer): if mask is not None: attn = paddle.transpose(attn, [1, 0, 2]) - attn = fluid.layers.elementwise_add(attn, mask * 1000000000, -1) + attn = paddle.add(attn, mask * 1000000000) attn = paddle.transpose(attn, [1, 0, 2]) weight = fluid.layers.softmax(attn) weight_memory = fluid.layers.matmul(weight, memory) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py index d6589a53a0bdd3ddb42b2a5171ee76a192b5881d..b10a5dc55806c861a8e3e8de3236e1ba4812abd0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model.py @@ -171,7 +171,7 @@ class ElementwiseAddLayer: """ operation """ - add = fluid.layers.elementwise_add(x, y) + add = paddle.add(x, y) return add @@ -190,7 +190,7 @@ class ElementwiseSubLayer: """ operation """ - sub = fluid.layers.elementwise_sub(x, y) + sub = paddle.subtract(x, y) return sub diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py index 03cd5e699e336935d68bca8501ff925887c44b95..06f460912b45be6d26a7df716e5860bfeaf76798 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py @@ -187,7 +187,7 @@ class ElementwiseSubLayer: """ operation """ - sub = paddle.fluid.layers.elementwise_sub(x, y) + sub = paddle.subtract(x, y) return sub diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index eaf37e7ea7a7539d7af4062b3fec5598120cdb31..c0f5e8c0c3ce105b2ab9a7a8dcb1c5dc72f1e4aa 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -330,11 +330,11 @@ def bmn_loss_func( coef_1 = 0.5 * ratio epsilon = 0.000001 # temp = fluid.layers.log(pred_score + epsilon) - loss_pos = fluid.layers.elementwise_mul( + loss_pos = paddle.multiply( fluid.layers.log(pred_score + epsilon), pmask ) loss_pos = coef_1 * fluid.layers.reduce_mean(loss_pos) - loss_neg = fluid.layers.elementwise_mul( + loss_neg = paddle.multiply( fluid.layers.log(1.0 - pred_score + epsilon), (1.0 - pmask) ) loss_neg = coef_0 * fluid.layers.reduce_mean(loss_neg) @@ -348,14 +348,14 @@ def bmn_loss_func( def pem_reg_loss_func(pred_score, gt_iou_map, mask): - gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask) + gt_iou_map = paddle.multiply(gt_iou_map, mask) u_hmask = fluid.layers.cast(x=gt_iou_map > 0.7, dtype=DATATYPE) u_mmask = paddle.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3) u_mmask = fluid.layers.cast(x=u_mmask, dtype=DATATYPE) u_lmask = paddle.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.0) u_lmask = fluid.layers.cast(x=u_lmask, dtype=DATATYPE) - u_lmask = fluid.layers.elementwise_mul(u_lmask, mask) + u_lmask = paddle.multiply(u_lmask, mask) num_h = fluid.layers.cast(paddle.sum(u_hmask), dtype=DATATYPE) num_m = fluid.layers.cast(paddle.sum(u_mmask), dtype=DATATYPE) @@ -367,7 +367,7 @@ def bmn_loss_func( 0.0, 1.0, [gt_iou_map.shape[1], gt_iou_map.shape[2]] ).astype(DATATYPE) ) - u_smmask = fluid.layers.elementwise_mul(u_mmask, u_smmask) + u_smmask = paddle.multiply(u_mmask, u_smmask) u_smmask = fluid.layers.cast(x=(u_smmask > (1.0 - r_m)), dtype=DATATYPE) r_l = num_h / num_l @@ -376,23 +376,23 @@ def bmn_loss_func( 0.0, 1.0, [gt_iou_map.shape[1], gt_iou_map.shape[2]] ).astype(DATATYPE) ) - u_slmask = fluid.layers.elementwise_mul(u_lmask, u_slmask) + u_slmask = paddle.multiply(u_lmask, u_slmask) u_slmask = fluid.layers.cast(x=(u_slmask > (1.0 - r_l)), dtype=DATATYPE) weights = u_hmask + u_smmask + u_slmask weights.stop_gradient = True loss = fluid.layers.square_error_cost(pred_score, gt_iou_map) - loss = fluid.layers.elementwise_mul(loss, weights) + loss = paddle.multiply(loss, weights) loss = 0.5 * paddle.sum(loss) / paddle.sum(weights) return loss def pem_cls_loss_func(pred_score, gt_iou_map, mask): - gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask) + gt_iou_map = paddle.multiply(gt_iou_map, mask) gt_iou_map.stop_gradient = True pmask = fluid.layers.cast(x=(gt_iou_map > 0.9), dtype=DATATYPE) nmask = fluid.layers.cast(x=(gt_iou_map <= 0.9), dtype=DATATYPE) - nmask = fluid.layers.elementwise_mul(nmask, mask) + nmask = paddle.multiply(nmask, mask) num_positive = paddle.sum(pmask) num_entries = num_positive + paddle.sum(nmask) @@ -400,11 +400,11 @@ def bmn_loss_func( coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio epsilon = 0.000001 - loss_pos = fluid.layers.elementwise_mul( + loss_pos = paddle.multiply( fluid.layers.log(pred_score + epsilon), pmask ) loss_pos = coef_1 * paddle.sum(loss_pos) - loss_neg = fluid.layers.elementwise_mul( + loss_neg = paddle.multiply( fluid.layers.log(1.0 - pred_score + epsilon), nmask ) loss_neg = coef_0 * paddle.sum(loss_neg) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py index 17972d7798c53492da515853f28a77e289d1b015..312d716af70624176907994296a62d4a81335733 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py @@ -91,8 +91,8 @@ class Cycle_Gan(fluid.dygraph.Layer): cyc_A = self.build_generator_resnet_9blocks_b(fake_B) cyc_B = self.build_generator_resnet_9blocks_a(fake_A) - diff_A = paddle.abs(fluid.layers.elementwise_sub(x=input_A, y=cyc_A)) - diff_B = paddle.abs(fluid.layers.elementwise_sub(x=input_B, y=cyc_B)) + diff_A = paddle.abs(paddle.subtract(x=input_A, y=cyc_A)) + diff_B = paddle.abs(paddle.subtract(x=input_B, y=cyc_B)) cyc_A_loss = fluid.layers.reduce_mean(diff_A) * lambda_A cyc_B_loss = fluid.layers.reduce_mean(diff_B) * lambda_B cyc_loss = cyc_A_loss + cyc_B_loss @@ -106,7 +106,7 @@ class Cycle_Gan(fluid.dygraph.Layer): idt_A = self.build_generator_resnet_9blocks_a(input_B) idt_loss_A = ( fluid.layers.reduce_mean( - paddle.abs(fluid.layers.elementwise_sub(x=input_B, y=idt_A)) + paddle.abs(paddle.subtract(x=input_B, y=idt_A)) ) * lambda_B * lambda_identity @@ -115,12 +115,12 @@ class Cycle_Gan(fluid.dygraph.Layer): idt_B = self.build_generator_resnet_9blocks_b(input_A) idt_loss_B = ( fluid.layers.reduce_mean( - paddle.abs(fluid.layers.elementwise_sub(x=input_A, y=idt_B)) + paddle.abs(paddle.subtract(x=input_A, y=idt_B)) ) * lambda_A * lambda_identity ) - idt_loss = fluid.layers.elementwise_add(idt_loss_A, idt_loss_B) + idt_loss = paddle.add(idt_loss_A, idt_loss_B) g_loss = cyc_loss + G + idt_loss return ( fake_A, diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py index 8b74bd7e9848bed240ee8fddb8770efdb00204ee..068046e00bde084ef288f7ab675e087abe081115 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mobile_net.py @@ -326,7 +326,7 @@ class InvertedResidualUnit(fluid.dygraph.Layer): y = self._bottleneck_conv(y, if_act=True) y = self._linear_conv(y, if_act=False) if ifshortcut: - y = fluid.layers.elementwise_add(inputs, y) + y = paddle.add(inputs, y) return y diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py index 1dc77a658bbd00a9afc8b8161d78d6af7b251407..fa062464d5aa965f6931cc4a7aca89a6d4232256 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py @@ -96,7 +96,7 @@ class SimpleLSTMRNN(fluid.Layer): nn = fluid.layers.concat([step_input, pre_hidden], 1) gate_input = fluid.layers.matmul(x=nn, y=weight_1) - gate_input = fluid.layers.elementwise_add(gate_input, bias) + gate_input = paddle.add(gate_input, bias) i, j, f, o = fluid.layers.split( gate_input, num_or_sections=4, dim=-1 ) @@ -214,7 +214,7 @@ class PtbModel(fluid.Layer): ) projection = fluid.layers.matmul(rnn_out, self.softmax_weight) - projection = fluid.layers.elementwise_add(projection, self.softmax_bias) + projection = paddle.add(projection, self.softmax_bias) loss = fluid.layers.softmax_with_cross_entropy( logits=projection, label=label, soft_label=False diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index f812cfef165f13055e2824b90119d1b2a41fd8e1..3b6da7e23c1c5cb76eeb27a5d897b072ffb98871 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -124,7 +124,7 @@ def train(args, place, to_static): mask.stop_gradient = True loss_probs = fluid.layers.log(loss_probs) - loss_probs = fluid.layers.elementwise_mul(loss_probs, mask) + loss_probs = paddle.multiply(loss_probs, mask) loss_probs = paddle.sum(loss_probs, axis=-1) policy.saved_log_probs.append(loss_probs) @@ -151,7 +151,7 @@ def train(args, place, to_static): _R = -1 * R * R_numpy _R = to_variable(_R) _R.stop_gradient = True - cur_loss = fluid.layers.elementwise_mul(_R, log_prob) + cur_loss = paddle.multiply(_R, log_prob) policy_loss.append(cur_loss) policy_loss = fluid.layers.concat(policy_loss) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py index 0dca14c462044f047c347a0238dcd0a1fc636113..5851f82630569de6c8a9b02d1a38cac600e8cba6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_resnet.py @@ -133,7 +133,7 @@ class BottleneckBlock(fluid.dygraph.Layer): else: short = self.short(inputs) - y = fluid.layers.elementwise_add(x=short, y=conv2) + y = paddle.add(x=short, y=conv2) layer_helper = fluid.layer_helper.LayerHelper( self.full_name(), act='relu' diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py index e58555003e9e040d84fdc4792372bda89552f226..7d3b07a395c907090398b9e90ed918f76e798af9 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py @@ -215,7 +215,7 @@ class BottleneckBlock(fluid.dygraph.Layer): else: short = self.short(inputs) - y = fluid.layers.elementwise_add(x=short, y=scale, act='relu') + y = paddle.nn.functional.relu(paddle.add(x=short, y=scale)) return y diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py index e8d4bcd9fd27bdd211468271ed75823cfac8a239..d5bd239afd4ad7edd388809cbe50e02529214c4b 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py @@ -133,7 +133,7 @@ class BottleneckBlock(fluid.dygraph.Layer): short = inputs else: short = self.short(inputs) - y = fluid.layers.elementwise_add(x=short, y=conv2, act="relu") + y = paddle.nn.functional.relu(paddle.add(x=short, y=conv2)) return y diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py index fc1341350c1659c309f1e111a647096fbb794963..9b444aecae50c741da567827aec009802db2424a 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py @@ -257,9 +257,7 @@ class SkipGram(fluid.dygraph.Layer): # center_words_emb = [batch_size, embedding_size] # target_words_emb = [batch_size, embedding_size] - word_sim = fluid.layers.elementwise_mul( - center_words_emb, target_words_emb - ) + word_sim = paddle.multiply(center_words_emb, target_words_emb) word_sim = paddle.sum(word_sim, axis=-1) pred = paddle.nn.functional.sigmoid(word_sim) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py index c996c54d053a6909e111da7242320d0b7339fb16..d0f329b96cfb81fa4a6d919fef598af94531fe1e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_dygraph_model.py @@ -756,13 +756,12 @@ class Transformer(Layer): def mask_probs(probs, finished, noend_mask_tensor): finished = layers.cast(finished, dtype=probs.dtype) - probs = layers.elementwise_mul( + probs = paddle.multiply( paddle.expand( layers.unsqueeze(finished, [2]), [-1, -1, self.trg_vocab_size], ), noend_mask_tensor, - axis=-1, ) - layers.elementwise_mul(probs, (finished - 1), axis=0) return probs diff --git a/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py index ff11f1e68f0ca0d23d3d44a1ef4c2bb449acdf15..43e3c44182de08ff1f967da38bc1e55fffcdcc91 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py @@ -45,7 +45,7 @@ class TestCheckFiniteAndUnscale(unittest.TestCase): inputs={"FloatStatus": float_status}, outputs={"FloatStatusOut": float_status}, ) - c = paddle.fluid.layers.elementwise_div(a, b) + c = paddle.divide(a, b) out, found_inf = check_finite_and_unscale( [c], scale, float_status=float_status ) @@ -115,7 +115,7 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): inputs={"FloatStatus": float_status}, outputs={"FloatStatusOut": float_status}, ) - c = paddle.fluid.layers.elementwise_div(a, b) + c = paddle.divide(a, b) out, found_inf = check_finite_and_unscale( [c], scale, float_status=float_status ) @@ -127,7 +127,7 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): inputs={"FloatStatus": float_status}, outputs={"FloatStatusOut": float_status}, ) - d = paddle.fluid.layers.elementwise_add(a, b) + d = paddle.add(a, b) out, found_inf = check_finite_and_unscale( [d], scale, float_status=float_status ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py index acdb8c75db56171cbe3046ffa54778924e88d646..42460f46a1ec775a98c1de86c5d2ddab6889d174 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_div_op_npu.py @@ -133,7 +133,7 @@ class TestElementwiseDivNet(unittest.TestCase): e = paddle.multiply(a, b) f = paddle.multiply(c, d) f.stop_gradient = True - g = fluid.layers.elementwise_div(e, f) + g = paddle.divide(e, f) fc_1 = fluid.layers.fc(input=g, size=128) prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax') diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding.py index f3f33d44d9646529e4fbbc99c30779ee43af5e10..824815d48aa2cc9c39ccb5b87560ec7c15a69a85 100644 --- a/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding.py +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_sparse_embedding.py @@ -66,7 +66,7 @@ class SimpleNet(fluid.Layer): def forward(self, input, label): x_emb = self.embedding(input) fc = fluid.layers.matmul(x_emb, self.softmax_weight) - fc = fluid.layers.elementwise_add(fc, self.softmax_bias) + fc = paddle.add(fc, self.softmax_bias) projection = paddle.reshape(fc, shape=[-1, self.vocab_size]) loss = fluid.layers.softmax_with_cross_entropy( logits=projection, label=label, soft_label=False