未验证 提交 2ca53fa6 编写于 作者: C chentianyu03 提交者: GitHub

change paddle.fluid.layers.reduce_sum to paddle.sum in sample codes (#27998) (#28017)

* change paddle.fluid.layers.reduce_sum to paddle.sum in sample codes

* format codes
上级 4316bd4d
...@@ -712,7 +712,7 @@ void BindImperative(py::module *m_ptr) { ...@@ -712,7 +712,7 @@ void BindImperative(py::module *m_ptr) {
tmp.stop_gradient=False tmp.stop_gradient=False
inputs.append(tmp) inputs.append(tmp)
ret = paddle.sums(inputs2) ret = paddle.sums(inputs2)
loss = paddle.fluid.layers.reduce_sum(ret) loss = paddle.sum(ret)
loss.backward() loss.backward()
print("Before clear_gradient {}".format(loss.grad)) print("Before clear_gradient {}".format(loss.grad))
loss.clear_gradient() loss.clear_gradient()
......
...@@ -305,13 +305,15 @@ def binary_cross_entropy_with_logits(logit, ...@@ -305,13 +305,15 @@ def binary_cross_entropy_with_logits(logit,
out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits( out = paddle.fluid.layers.sigmoid_cross_entropy_with_logits(
logit, label, name=sigmoid_name) logit, label, name=sigmoid_name)
one = paddle.fluid.layers.fill_constant(shape=[1], value=1.0, dtype=logit.dtype) one = paddle.fluid.layers.fill_constant(
shape=[1], value=1.0, dtype=logit.dtype)
if pos_weight is not None: if pos_weight is not None:
fluid.data_feeder.check_variable_and_dtype( fluid.data_feeder.check_variable_and_dtype(
pos_weight, 'pos_weight', ['float32', 'float64'], pos_weight, 'pos_weight', ['float32', 'float64'],
'binary_cross_entropy_with_logits') 'binary_cross_entropy_with_logits')
log_weight = paddle.add( log_weight = paddle.add(
paddle.multiply(label, paddle.fluid.layers.elementwise_sub(pos_weight, one)), paddle.multiply(
label, paddle.fluid.layers.elementwise_sub(pos_weight, one)),
one) one)
pos_weight_name = name if reduction == 'none' and weight is None else None pos_weight_name = name if reduction == 'none' and weight is None else None
out = paddle.multiply(out, log_weight, name=pos_weight_name) out = paddle.multiply(out, log_weight, name=pos_weight_name)
...@@ -618,7 +620,8 @@ def margin_ranking_loss(input, ...@@ -618,7 +620,8 @@ def margin_ranking_loss(input,
if margin != 0.0: if margin != 0.0:
margin_var = out.block.create_var(dtype=out.dtype) margin_var = out.block.create_var(dtype=out.dtype)
paddle.fluid.layers.fill_constant([1], out.dtype, margin, out=margin_var) paddle.fluid.layers.fill_constant(
[1], out.dtype, margin, out=margin_var)
out = paddle.add(out, margin_var) out = paddle.add(out, margin_var)
result_out = helper.create_variable_for_type_inference(input.dtype) result_out = helper.create_variable_for_type_inference(input.dtype)
...@@ -729,7 +732,8 @@ def l1_loss(input, label, reduction='mean', name=None): ...@@ -729,7 +732,8 @@ def l1_loss(input, label, reduction='mean', name=None):
unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs') unreduced = paddle.fluid.layers.elementwise_sub(input, label, act='abs')
return paddle.mean(unreduced, name=name) return paddle.mean(unreduced, name=name)
else: else:
return paddle.fluid.layers.elementwise_sub(input, label, act='abs', name=name) return paddle.fluid.layers.elementwise_sub(
input, label, act='abs', name=name)
def nll_loss(input, def nll_loss(input,
...@@ -1342,7 +1346,7 @@ def sigmoid_focal_loss(logit, ...@@ -1342,7 +1346,7 @@ def sigmoid_focal_loss(logit,
label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32') label = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype='float32')
one = paddle.to_tensor([1.], dtype='float32') one = paddle.to_tensor([1.], dtype='float32')
fg_label = paddle.greater_equal(label, one) fg_label = paddle.greater_equal(label, one)
fg_num = paddle.fluid.layers.reduce_sum(paddle.cast(fg_label, dtype='float32')) fg_num = paddle.sum(paddle.cast(fg_label, dtype='float32'))
output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num) output = paddle.nn.functional.sigmoid_focal_loss(logit, label, normalizer=fg_num)
print(output.numpy()) # [0.65782464] print(output.numpy()) # [0.65782464]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册