未验证 提交 2005d45a 编写于 作者: H HongyuJia 提交者: GitHub

clean elem_arithmetic part3 unittest (#48462)

上级 048e0c55
...@@ -57,7 +57,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): ...@@ -57,7 +57,7 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size):
cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.less_than(cos_q_nt, cos_q_pt)
cond = fluid.layers.cast(cond, dtype='float64') cond = fluid.layers.cast(cond, dtype='float64')
cond_3 = paddle.sum(cond) cond_3 = paddle.sum(cond)
acc = fluid.layers.elementwise_div( acc = paddle.divide(
cond_3, cond_3,
fluid.layers.fill_constant( fluid.layers.fill_constant(
shape=[1], value=batch_size * 1.0, dtype='float64' shape=[1], value=batch_size * 1.0, dtype='float64'
...@@ -68,13 +68,13 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size): ...@@ -68,13 +68,13 @@ def get_acc(cos_q_nt, cos_q_pt, batch_size):
def get_loss(cos_q_pt, cos_q_nt): def get_loss(cos_q_pt, cos_q_nt):
loss_op1 = fluid.layers.elementwise_sub( loss_op1 = paddle.subtract(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32' input=cos_q_pt, shape=[-1, 1], value=margin, dtype='float32'
), ),
cos_q_pt, cos_q_pt,
) )
loss_op2 = fluid.layers.elementwise_add(loss_op1, cos_q_nt) loss_op2 = paddle.add(loss_op1, cos_q_nt)
loss_op3 = paddle.maximum( loss_op3 = paddle.maximum(
fluid.layers.fill_constant_batch_size_like( fluid.layers.fill_constant_batch_size_like(
input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32' input=loss_op2, shape=[-1, 1], value=0.0, dtype='float32'
......
...@@ -163,7 +163,7 @@ class SE_ResNeXt: ...@@ -163,7 +163,7 @@ class SE_ResNeXt:
short = self.shortcut(input, num_filters * 2, stride) short = self.shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu') return paddle.nn.functional.relu(paddle.add(x=short, y=scale))
def conv_bn_layer( def conv_bn_layer(
self, input, num_filters, filter_size, stride=1, groups=1, act=None self, input, num_filters, filter_size, stride=1, groups=1, act=None
......
...@@ -114,7 +114,7 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -114,7 +114,7 @@ class BasicBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
conv1 = self.conv1(inputs) conv1 = self.conv1(inputs)
conv2 = self.conv2(conv1) conv2 = self.conv2(conv1)
out = fluid.layers.elementwise_add(x=inputs, y=conv2, act=None) out = paddle.add(x=inputs, y=conv2)
return out return out
......
...@@ -72,15 +72,13 @@ class BasicLSTMUnit(Layer): ...@@ -72,15 +72,13 @@ class BasicLSTMUnit(Layer):
concat_input_hidden = layers.concat([input, pre_hidden], 1) concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._weight) gate_input = layers.matmul(x=concat_input_hidden, y=self._weight)
gate_input = layers.elementwise_add(gate_input, self._bias) gate_input = paddle.add(gate_input, self._bias)
i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1) i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
new_cell = layers.elementwise_add( new_cell = paddle.add(
layers.elementwise_mul( paddle.multiply(
pre_cell, paddle.nn.functional.sigmoid(f + self._forget_bias) pre_cell, paddle.nn.functional.sigmoid(f + self._forget_bias)
), ),
layers.elementwise_mul( paddle.multiply(paddle.nn.functional.sigmoid(i), paddle.tanh(j)),
paddle.nn.functional.sigmoid(i), paddle.tanh(j)
),
) )
new_hidden = paddle.tanh(new_cell) * paddle.nn.functional.sigmoid(o) new_hidden = paddle.tanh(new_cell) * paddle.nn.functional.sigmoid(o)
...@@ -442,13 +440,12 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -442,13 +440,12 @@ class BaseModel(fluid.dygraph.Layer):
np.array(noend_array, dtype='float32') np.array(noend_array, dtype='float32')
) )
step_log_probs = fluid.layers.elementwise_mul( step_log_probs = paddle.multiply(
paddle.expand( paddle.expand(
fluid.layers.unsqueeze(beam_finished, [2]), fluid.layers.unsqueeze(beam_finished, [2]),
[-1, -1, self.tar_vocab_size], [-1, -1, self.tar_vocab_size],
), ),
noend_mask_tensor, noend_mask_tensor,
axis=-1,
) - fluid.layers.elementwise_mul( ) - fluid.layers.elementwise_mul(
step_log_probs, (beam_finished - 1), axis=0 step_log_probs, (beam_finished - 1), axis=0
) )
...@@ -693,7 +690,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -693,7 +690,7 @@ class AttentionModel(fluid.dygraph.Layer):
if mask is not None: if mask is not None:
attn = paddle.transpose(attn, [1, 0, 2]) attn = paddle.transpose(attn, [1, 0, 2])
attn = fluid.layers.elementwise_add(attn, mask * 1000000000, -1) attn = paddle.add(attn, mask * 1000000000)
attn = paddle.transpose(attn, [1, 0, 2]) attn = paddle.transpose(attn, [1, 0, 2])
weight = fluid.layers.softmax(attn) weight = fluid.layers.softmax(attn)
weight_memory = fluid.layers.matmul(weight, memory) weight_memory = fluid.layers.matmul(weight, memory)
......
...@@ -171,7 +171,7 @@ class ElementwiseAddLayer: ...@@ -171,7 +171,7 @@ class ElementwiseAddLayer:
""" """
operation operation
""" """
add = fluid.layers.elementwise_add(x, y) add = paddle.add(x, y)
return add return add
...@@ -190,7 +190,7 @@ class ElementwiseSubLayer: ...@@ -190,7 +190,7 @@ class ElementwiseSubLayer:
""" """
operation operation
""" """
sub = fluid.layers.elementwise_sub(x, y) sub = paddle.subtract(x, y)
return sub return sub
......
...@@ -187,7 +187,7 @@ class ElementwiseSubLayer: ...@@ -187,7 +187,7 @@ class ElementwiseSubLayer:
""" """
operation operation
""" """
sub = paddle.fluid.layers.elementwise_sub(x, y) sub = paddle.subtract(x, y)
return sub return sub
......
...@@ -330,11 +330,11 @@ def bmn_loss_func( ...@@ -330,11 +330,11 @@ def bmn_loss_func(
coef_1 = 0.5 * ratio coef_1 = 0.5 * ratio
epsilon = 0.000001 epsilon = 0.000001
# temp = fluid.layers.log(pred_score + epsilon) # temp = fluid.layers.log(pred_score + epsilon)
loss_pos = fluid.layers.elementwise_mul( loss_pos = paddle.multiply(
fluid.layers.log(pred_score + epsilon), pmask fluid.layers.log(pred_score + epsilon), pmask
) )
loss_pos = coef_1 * fluid.layers.reduce_mean(loss_pos) loss_pos = coef_1 * fluid.layers.reduce_mean(loss_pos)
loss_neg = fluid.layers.elementwise_mul( loss_neg = paddle.multiply(
fluid.layers.log(1.0 - pred_score + epsilon), (1.0 - pmask) fluid.layers.log(1.0 - pred_score + epsilon), (1.0 - pmask)
) )
loss_neg = coef_0 * fluid.layers.reduce_mean(loss_neg) loss_neg = coef_0 * fluid.layers.reduce_mean(loss_neg)
...@@ -348,14 +348,14 @@ def bmn_loss_func( ...@@ -348,14 +348,14 @@ def bmn_loss_func(
def pem_reg_loss_func(pred_score, gt_iou_map, mask): def pem_reg_loss_func(pred_score, gt_iou_map, mask):
gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask) gt_iou_map = paddle.multiply(gt_iou_map, mask)
u_hmask = fluid.layers.cast(x=gt_iou_map > 0.7, dtype=DATATYPE) u_hmask = fluid.layers.cast(x=gt_iou_map > 0.7, dtype=DATATYPE)
u_mmask = paddle.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3) u_mmask = paddle.logical_and(gt_iou_map <= 0.7, gt_iou_map > 0.3)
u_mmask = fluid.layers.cast(x=u_mmask, dtype=DATATYPE) u_mmask = fluid.layers.cast(x=u_mmask, dtype=DATATYPE)
u_lmask = paddle.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.0) u_lmask = paddle.logical_and(gt_iou_map <= 0.3, gt_iou_map >= 0.0)
u_lmask = fluid.layers.cast(x=u_lmask, dtype=DATATYPE) u_lmask = fluid.layers.cast(x=u_lmask, dtype=DATATYPE)
u_lmask = fluid.layers.elementwise_mul(u_lmask, mask) u_lmask = paddle.multiply(u_lmask, mask)
num_h = fluid.layers.cast(paddle.sum(u_hmask), dtype=DATATYPE) num_h = fluid.layers.cast(paddle.sum(u_hmask), dtype=DATATYPE)
num_m = fluid.layers.cast(paddle.sum(u_mmask), dtype=DATATYPE) num_m = fluid.layers.cast(paddle.sum(u_mmask), dtype=DATATYPE)
...@@ -367,7 +367,7 @@ def bmn_loss_func( ...@@ -367,7 +367,7 @@ def bmn_loss_func(
0.0, 1.0, [gt_iou_map.shape[1], gt_iou_map.shape[2]] 0.0, 1.0, [gt_iou_map.shape[1], gt_iou_map.shape[2]]
).astype(DATATYPE) ).astype(DATATYPE)
) )
u_smmask = fluid.layers.elementwise_mul(u_mmask, u_smmask) u_smmask = paddle.multiply(u_mmask, u_smmask)
u_smmask = fluid.layers.cast(x=(u_smmask > (1.0 - r_m)), dtype=DATATYPE) u_smmask = fluid.layers.cast(x=(u_smmask > (1.0 - r_m)), dtype=DATATYPE)
r_l = num_h / num_l r_l = num_h / num_l
...@@ -376,23 +376,23 @@ def bmn_loss_func( ...@@ -376,23 +376,23 @@ def bmn_loss_func(
0.0, 1.0, [gt_iou_map.shape[1], gt_iou_map.shape[2]] 0.0, 1.0, [gt_iou_map.shape[1], gt_iou_map.shape[2]]
).astype(DATATYPE) ).astype(DATATYPE)
) )
u_slmask = fluid.layers.elementwise_mul(u_lmask, u_slmask) u_slmask = paddle.multiply(u_lmask, u_slmask)
u_slmask = fluid.layers.cast(x=(u_slmask > (1.0 - r_l)), dtype=DATATYPE) u_slmask = fluid.layers.cast(x=(u_slmask > (1.0 - r_l)), dtype=DATATYPE)
weights = u_hmask + u_smmask + u_slmask weights = u_hmask + u_smmask + u_slmask
weights.stop_gradient = True weights.stop_gradient = True
loss = fluid.layers.square_error_cost(pred_score, gt_iou_map) loss = fluid.layers.square_error_cost(pred_score, gt_iou_map)
loss = fluid.layers.elementwise_mul(loss, weights) loss = paddle.multiply(loss, weights)
loss = 0.5 * paddle.sum(loss) / paddle.sum(weights) loss = 0.5 * paddle.sum(loss) / paddle.sum(weights)
return loss return loss
def pem_cls_loss_func(pred_score, gt_iou_map, mask): def pem_cls_loss_func(pred_score, gt_iou_map, mask):
gt_iou_map = fluid.layers.elementwise_mul(gt_iou_map, mask) gt_iou_map = paddle.multiply(gt_iou_map, mask)
gt_iou_map.stop_gradient = True gt_iou_map.stop_gradient = True
pmask = fluid.layers.cast(x=(gt_iou_map > 0.9), dtype=DATATYPE) pmask = fluid.layers.cast(x=(gt_iou_map > 0.9), dtype=DATATYPE)
nmask = fluid.layers.cast(x=(gt_iou_map <= 0.9), dtype=DATATYPE) nmask = fluid.layers.cast(x=(gt_iou_map <= 0.9), dtype=DATATYPE)
nmask = fluid.layers.elementwise_mul(nmask, mask) nmask = paddle.multiply(nmask, mask)
num_positive = paddle.sum(pmask) num_positive = paddle.sum(pmask)
num_entries = num_positive + paddle.sum(nmask) num_entries = num_positive + paddle.sum(nmask)
...@@ -400,11 +400,11 @@ def bmn_loss_func( ...@@ -400,11 +400,11 @@ def bmn_loss_func(
coef_0 = 0.5 * ratio / (ratio - 1) coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio coef_1 = 0.5 * ratio
epsilon = 0.000001 epsilon = 0.000001
loss_pos = fluid.layers.elementwise_mul( loss_pos = paddle.multiply(
fluid.layers.log(pred_score + epsilon), pmask fluid.layers.log(pred_score + epsilon), pmask
) )
loss_pos = coef_1 * paddle.sum(loss_pos) loss_pos = coef_1 * paddle.sum(loss_pos)
loss_neg = fluid.layers.elementwise_mul( loss_neg = paddle.multiply(
fluid.layers.log(1.0 - pred_score + epsilon), nmask fluid.layers.log(1.0 - pred_score + epsilon), nmask
) )
loss_neg = coef_0 * paddle.sum(loss_neg) loss_neg = coef_0 * paddle.sum(loss_neg)
......
...@@ -91,8 +91,8 @@ class Cycle_Gan(fluid.dygraph.Layer): ...@@ -91,8 +91,8 @@ class Cycle_Gan(fluid.dygraph.Layer):
cyc_A = self.build_generator_resnet_9blocks_b(fake_B) cyc_A = self.build_generator_resnet_9blocks_b(fake_B)
cyc_B = self.build_generator_resnet_9blocks_a(fake_A) cyc_B = self.build_generator_resnet_9blocks_a(fake_A)
diff_A = paddle.abs(fluid.layers.elementwise_sub(x=input_A, y=cyc_A)) diff_A = paddle.abs(paddle.subtract(x=input_A, y=cyc_A))
diff_B = paddle.abs(fluid.layers.elementwise_sub(x=input_B, y=cyc_B)) diff_B = paddle.abs(paddle.subtract(x=input_B, y=cyc_B))
cyc_A_loss = fluid.layers.reduce_mean(diff_A) * lambda_A cyc_A_loss = fluid.layers.reduce_mean(diff_A) * lambda_A
cyc_B_loss = fluid.layers.reduce_mean(diff_B) * lambda_B cyc_B_loss = fluid.layers.reduce_mean(diff_B) * lambda_B
cyc_loss = cyc_A_loss + cyc_B_loss cyc_loss = cyc_A_loss + cyc_B_loss
...@@ -106,7 +106,7 @@ class Cycle_Gan(fluid.dygraph.Layer): ...@@ -106,7 +106,7 @@ class Cycle_Gan(fluid.dygraph.Layer):
idt_A = self.build_generator_resnet_9blocks_a(input_B) idt_A = self.build_generator_resnet_9blocks_a(input_B)
idt_loss_A = ( idt_loss_A = (
fluid.layers.reduce_mean( fluid.layers.reduce_mean(
paddle.abs(fluid.layers.elementwise_sub(x=input_B, y=idt_A)) paddle.abs(paddle.subtract(x=input_B, y=idt_A))
) )
* lambda_B * lambda_B
* lambda_identity * lambda_identity
...@@ -115,12 +115,12 @@ class Cycle_Gan(fluid.dygraph.Layer): ...@@ -115,12 +115,12 @@ class Cycle_Gan(fluid.dygraph.Layer):
idt_B = self.build_generator_resnet_9blocks_b(input_A) idt_B = self.build_generator_resnet_9blocks_b(input_A)
idt_loss_B = ( idt_loss_B = (
fluid.layers.reduce_mean( fluid.layers.reduce_mean(
paddle.abs(fluid.layers.elementwise_sub(x=input_A, y=idt_B)) paddle.abs(paddle.subtract(x=input_A, y=idt_B))
) )
* lambda_A * lambda_A
* lambda_identity * lambda_identity
) )
idt_loss = fluid.layers.elementwise_add(idt_loss_A, idt_loss_B) idt_loss = paddle.add(idt_loss_A, idt_loss_B)
g_loss = cyc_loss + G + idt_loss g_loss = cyc_loss + G + idt_loss
return ( return (
fake_A, fake_A,
......
...@@ -326,7 +326,7 @@ class InvertedResidualUnit(fluid.dygraph.Layer): ...@@ -326,7 +326,7 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
y = self._bottleneck_conv(y, if_act=True) y = self._bottleneck_conv(y, if_act=True)
y = self._linear_conv(y, if_act=False) y = self._linear_conv(y, if_act=False)
if ifshortcut: if ifshortcut:
y = fluid.layers.elementwise_add(inputs, y) y = paddle.add(inputs, y)
return y return y
......
...@@ -96,7 +96,7 @@ class SimpleLSTMRNN(fluid.Layer): ...@@ -96,7 +96,7 @@ class SimpleLSTMRNN(fluid.Layer):
nn = fluid.layers.concat([step_input, pre_hidden], 1) nn = fluid.layers.concat([step_input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias) gate_input = paddle.add(gate_input, bias)
i, j, f, o = fluid.layers.split( i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1 gate_input, num_or_sections=4, dim=-1
) )
...@@ -214,7 +214,7 @@ class PtbModel(fluid.Layer): ...@@ -214,7 +214,7 @@ class PtbModel(fluid.Layer):
) )
projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = paddle.add(projection, self.softmax_bias)
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
......
...@@ -124,7 +124,7 @@ def train(args, place, to_static): ...@@ -124,7 +124,7 @@ def train(args, place, to_static):
mask.stop_gradient = True mask.stop_gradient = True
loss_probs = fluid.layers.log(loss_probs) loss_probs = fluid.layers.log(loss_probs)
loss_probs = fluid.layers.elementwise_mul(loss_probs, mask) loss_probs = paddle.multiply(loss_probs, mask)
loss_probs = paddle.sum(loss_probs, axis=-1) loss_probs = paddle.sum(loss_probs, axis=-1)
policy.saved_log_probs.append(loss_probs) policy.saved_log_probs.append(loss_probs)
...@@ -151,7 +151,7 @@ def train(args, place, to_static): ...@@ -151,7 +151,7 @@ def train(args, place, to_static):
_R = -1 * R * R_numpy _R = -1 * R * R_numpy
_R = to_variable(_R) _R = to_variable(_R)
_R.stop_gradient = True _R.stop_gradient = True
cur_loss = fluid.layers.elementwise_mul(_R, log_prob) cur_loss = paddle.multiply(_R, log_prob)
policy_loss.append(cur_loss) policy_loss.append(cur_loss)
policy_loss = fluid.layers.concat(policy_loss) policy_loss = fluid.layers.concat(policy_loss)
......
...@@ -133,7 +133,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -133,7 +133,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = paddle.add(x=short, y=conv2)
layer_helper = fluid.layer_helper.LayerHelper( layer_helper = fluid.layer_helper.LayerHelper(
self.full_name(), act='relu' self.full_name(), act='relu'
......
...@@ -215,7 +215,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -215,7 +215,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale, act='relu') y = paddle.nn.functional.relu(paddle.add(x=short, y=scale))
return y return y
......
...@@ -133,7 +133,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -133,7 +133,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2, act="relu") y = paddle.nn.functional.relu(paddle.add(x=short, y=conv2))
return y return y
......
...@@ -257,9 +257,7 @@ class SkipGram(fluid.dygraph.Layer): ...@@ -257,9 +257,7 @@ class SkipGram(fluid.dygraph.Layer):
# center_words_emb = [batch_size, embedding_size] # center_words_emb = [batch_size, embedding_size]
# target_words_emb = [batch_size, embedding_size] # target_words_emb = [batch_size, embedding_size]
word_sim = fluid.layers.elementwise_mul( word_sim = paddle.multiply(center_words_emb, target_words_emb)
center_words_emb, target_words_emb
)
word_sim = paddle.sum(word_sim, axis=-1) word_sim = paddle.sum(word_sim, axis=-1)
pred = paddle.nn.functional.sigmoid(word_sim) pred = paddle.nn.functional.sigmoid(word_sim)
......
...@@ -756,13 +756,12 @@ class Transformer(Layer): ...@@ -756,13 +756,12 @@ class Transformer(Layer):
def mask_probs(probs, finished, noend_mask_tensor): def mask_probs(probs, finished, noend_mask_tensor):
finished = layers.cast(finished, dtype=probs.dtype) finished = layers.cast(finished, dtype=probs.dtype)
probs = layers.elementwise_mul( probs = paddle.multiply(
paddle.expand( paddle.expand(
layers.unsqueeze(finished, [2]), layers.unsqueeze(finished, [2]),
[-1, -1, self.trg_vocab_size], [-1, -1, self.trg_vocab_size],
), ),
noend_mask_tensor, noend_mask_tensor,
axis=-1,
) - layers.elementwise_mul(probs, (finished - 1), axis=0) ) - layers.elementwise_mul(probs, (finished - 1), axis=0)
return probs return probs
......
...@@ -45,7 +45,7 @@ class TestCheckFiniteAndUnscale(unittest.TestCase): ...@@ -45,7 +45,7 @@ class TestCheckFiniteAndUnscale(unittest.TestCase):
inputs={"FloatStatus": float_status}, inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status}, outputs={"FloatStatusOut": float_status},
) )
c = paddle.fluid.layers.elementwise_div(a, b) c = paddle.divide(a, b)
out, found_inf = check_finite_and_unscale( out, found_inf = check_finite_and_unscale(
[c], scale, float_status=float_status [c], scale, float_status=float_status
) )
...@@ -115,7 +115,7 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase):
inputs={"FloatStatus": float_status}, inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status}, outputs={"FloatStatusOut": float_status},
) )
c = paddle.fluid.layers.elementwise_div(a, b) c = paddle.divide(a, b)
out, found_inf = check_finite_and_unscale( out, found_inf = check_finite_and_unscale(
[c], scale, float_status=float_status [c], scale, float_status=float_status
) )
...@@ -127,7 +127,7 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase): ...@@ -127,7 +127,7 @@ class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase):
inputs={"FloatStatus": float_status}, inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status}, outputs={"FloatStatusOut": float_status},
) )
d = paddle.fluid.layers.elementwise_add(a, b) d = paddle.add(a, b)
out, found_inf = check_finite_and_unscale( out, found_inf = check_finite_and_unscale(
[d], scale, float_status=float_status [d], scale, float_status=float_status
) )
......
...@@ -133,7 +133,7 @@ class TestElementwiseDivNet(unittest.TestCase): ...@@ -133,7 +133,7 @@ class TestElementwiseDivNet(unittest.TestCase):
e = paddle.multiply(a, b) e = paddle.multiply(a, b)
f = paddle.multiply(c, d) f = paddle.multiply(c, d)
f.stop_gradient = True f.stop_gradient = True
g = fluid.layers.elementwise_div(e, f) g = paddle.divide(e, f)
fc_1 = fluid.layers.fc(input=g, size=128) fc_1 = fluid.layers.fc(input=g, size=128)
prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax') prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')
......
...@@ -66,7 +66,7 @@ class SimpleNet(fluid.Layer): ...@@ -66,7 +66,7 @@ class SimpleNet(fluid.Layer):
def forward(self, input, label): def forward(self, input, label):
x_emb = self.embedding(input) x_emb = self.embedding(input)
fc = fluid.layers.matmul(x_emb, self.softmax_weight) fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias) fc = paddle.add(fc, self.softmax_bias)
projection = paddle.reshape(fc, shape=[-1, self.vocab_size]) projection = paddle.reshape(fc, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy( loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False logits=projection, label=label, soft_label=False
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册