Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
bf4a4636
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bf4a4636
编写于
8月 24, 2020
作者:
Z
Zhong Hui
提交者:
GitHub
8月 24, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change to use bce_loss op, add shape check for bce_loss
change to use bce_loss op, add numel check for bce_loss.
上级
0e816260
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
73 addition
and
81 deletion
+73
-81
paddle/fluid/operators/bce_loss_op.cc
paddle/fluid/operators/bce_loss_op.cc
+41
-23
paddle/fluid/operators/bce_loss_op.cu
paddle/fluid/operators/bce_loss_op.cu
+3
-2
paddle/fluid/operators/bce_loss_op.h
paddle/fluid/operators/bce_loss_op.h
+2
-2
python/paddle/fluid/tests/unittests/test_bce_loss.py
python/paddle/fluid/tests/unittests/test_bce_loss.py
+0
-14
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+27
-40
未找到文件。
paddle/fluid/operators/bce_loss_op.cc
浏览文件 @
bf4a4636
...
@@ -32,22 +32,29 @@ class BCELossOp : public framework::OperatorWithKernel {
...
@@ -32,22 +32,29 @@ class BCELossOp : public framework::OperatorWithKernel {
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"BCELoss"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"BCELoss"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
label_dims
=
ctx
->
GetInputDim
(
"Label"
);
auto
labels_dims
=
ctx
->
GetInputDim
(
"Label"
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
label_dims
.
size
(),
int
rank
=
x_dims
.
size
();
PADDLE_ENFORCE_EQ
(
rank
,
labels_dims
.
size
(),
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same shape."
));
"Input(X) and Input(Label) shall have the same rank."
bool
contain_unknown_dim
=
framework
::
contain_unknown_dim
(
x_dims
)
||
"But received: the rank of Input(X) is [%d], "
framework
::
contain_unknown_dim
(
label_dims
);
"the rank of Input(Label) is [%d]."
,
bool
check
=
ctx
->
IsRuntime
()
||
!
contain_unknown_dim
;
rank
,
labels_dims
.
size
()));
bool
check
=
true
;
if
((
!
ctx
->
IsRuntime
())
&&
(
framework
::
product
(
x_dims
)
<=
0
||
framework
::
product
(
labels_dims
)
<=
0
))
{
check
=
false
;
}
if
(
check
)
{
if
(
check
)
{
PADDLE_ENFORCE_EQ
(
PADDLE_ENFORCE_EQ
(
x_dims
,
labels_dims
,
x_dims
.
size
(),
label_dims
.
size
(),
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"ShapeError: Input(X) and Input(Label) shall have the same shap
e "
"Input(X) and Input(Label) shall have the sam
e "
"But received: the shape of Input(X) is [%s], the shape of
"
"shape. But received: the shape of Input(X) is
"
"
Input(Label) is [%s]."
,
"[%s], the shape of
Input(Label) is [%s]."
,
x_dims
,
label
_dims
));
x_dims
,
labels
_dims
));
}
}
ctx
->
ShareDim
(
"X"
,
"Out"
);
ctx
->
ShareDim
(
"X"
,
"Out"
);
...
@@ -76,20 +83,31 @@ class BCELossGradOp : public framework::OperatorWithKernel {
...
@@ -76,20 +83,31 @@ class BCELossGradOp : public framework::OperatorWithKernel {
framework
::
GradVarName
(
"X"
),
"BCELossGrad"
);
framework
::
GradVarName
(
"X"
),
"BCELossGrad"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
labels_dims
=
ctx
->
GetInputDim
(
"Label"
);
auto
dout_dims
=
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"Out"
));
auto
dout_dims
=
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"Out"
));
bool
contain_unknown_dim
=
framework
::
contain_unknown_dim
(
x_dims
)
||
framework
::
contain_unknown_dim
(
dout_dims
);
bool
check
=
true
;
bool
check
=
ctx
->
IsRuntime
()
||
!
contain_unknown_dim
;
if
((
!
ctx
->
IsRuntime
())
&&
(
framework
::
product
(
x_dims
)
<=
0
||
framework
::
product
(
labels_dims
)
<=
0
))
{
check
=
false
;
}
if
(
check
)
{
if
(
check
)
{
PADDLE_ENFORCE_EQ
(
x_dims
,
labels_dims
,
platform
::
errors
::
InvalidArgument
(
"Input(X) and Input(Label) shall have the same "
"shape. But received: the shape of Input(X) is "
"[%s], the shape of Input(Label) is [%s]."
,
x_dims
,
labels_dims
));
PADDLE_ENFORCE_EQ
(
x_dims
,
dout_dims
,
PADDLE_ENFORCE_EQ
(
x_dims
,
dout_dims
,
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
"ShapeError:The Input(X) and Input(Out@Grad) "
"Input(X) and Input(Out@Grad) shall have the same "
"should have the same "
"shape. But received: the shape of Input(X) is "
"shape, But received: the shape of Input(X) is "
"[%s], the shape of Input(Out@Grad) is [%s]."
,
"[%s], the shape of "
"Input(Out@GRAD) is [%s]."
,
x_dims
,
dout_dims
));
x_dims
,
dout_dims
));
}
}
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
x_dims
);
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
x_dims
);
ctx
->
ShareLoD
(
"X"
,
framework
::
GradVarName
(
"X"
));
ctx
->
ShareLoD
(
"X"
,
framework
::
GradVarName
(
"X"
));
}
}
...
...
paddle/fluid/operators/bce_loss_op.cu
浏览文件 @
bf4a4636
...
@@ -67,7 +67,8 @@ class BCELossCUDAKernel : public framework::OpKernel<T> {
...
@@ -67,7 +67,8 @@ class BCELossCUDAKernel : public framework::OpKernel<T> {
auto
x_data
=
x
->
data
<
T
>
();
auto
x_data
=
x
->
data
<
T
>
();
auto
out_data
=
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
out_data
=
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
int
x_numel
=
x
->
numel
();
auto
x_numel
=
x
->
numel
();
platform
::
GpuLaunchConfig
config
=
platform
::
GpuLaunchConfig
config
=
platform
::
getGpuLaunchConfig
(
x_numel
,
ctx
);
platform
::
getGpuLaunchConfig
(
x_numel
,
ctx
);
...
@@ -75,7 +76,7 @@ class BCELossCUDAKernel : public framework::OpKernel<T> {
...
@@ -75,7 +76,7 @@ class BCELossCUDAKernel : public framework::OpKernel<T> {
framework
::
TensorCopy
(
*
x
,
platform
::
CPUPlace
(),
&
x_cpu
);
framework
::
TensorCopy
(
*
x
,
platform
::
CPUPlace
(),
&
x_cpu
);
T
*
x_cpu_data
=
x_cpu
.
data
<
T
>
();
T
*
x_cpu_data
=
x_cpu
.
data
<
T
>
();
for
(
int
i
=
0
;
i
<
x_numel
;
++
i
)
{
for
(
int
64_t
i
=
0
;
i
<
x_numel
;
++
i
)
{
PADDLE_ENFORCE_GE
(
PADDLE_ENFORCE_GE
(
x_cpu_data
[
i
],
static_cast
<
T
>
(
0
),
x_cpu_data
[
i
],
static_cast
<
T
>
(
0
),
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
...
...
paddle/fluid/operators/bce_loss_op.h
浏览文件 @
bf4a4636
...
@@ -34,11 +34,11 @@ class BCELossOpKernel : public framework::OpKernel<T> {
...
@@ -34,11 +34,11 @@ class BCELossOpKernel : public framework::OpKernel<T> {
auto
x_data
=
x
->
data
<
T
>
();
auto
x_data
=
x
->
data
<
T
>
();
auto
label_data
=
labels
->
data
<
T
>
();
auto
label_data
=
labels
->
data
<
T
>
();
auto
out_data
=
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
out_data
=
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
int
x_numel
=
x
->
numel
();
auto
x_numel
=
x
->
numel
();
// out = -(label * ln(x) + (1 - label) * ln(1 - x)) = (label - 1) * ln(1 -
// out = -(label * ln(x) + (1 - label) * ln(1 - x)) = (label - 1) * ln(1 -
// x) - label * ln(x)
// x) - label * ln(x)
for
(
int
i
=
0
;
i
<
x_numel
;
++
i
)
{
for
(
int
64_t
i
=
0
;
i
<
x_numel
;
++
i
)
{
PADDLE_ENFORCE_GE
(
PADDLE_ENFORCE_GE
(
x_data
[
i
],
static_cast
<
T
>
(
0
),
x_data
[
i
],
static_cast
<
T
>
(
0
),
platform
::
errors
::
InvalidArgument
(
platform
::
errors
::
InvalidArgument
(
...
...
python/paddle/fluid/tests/unittests/test_bce_loss.py
浏览文件 @
bf4a4636
...
@@ -189,20 +189,6 @@ class TestBCELoss(unittest.TestCase):
...
@@ -189,20 +189,6 @@ class TestBCELoss(unittest.TestCase):
self
.
assertTrue
(
np
.
allclose
(
static_functional
,
dy_functional
))
self
.
assertTrue
(
np
.
allclose
(
static_functional
,
dy_functional
))
self
.
assertTrue
(
np
.
allclose
(
dy_functional
,
expected
))
self
.
assertTrue
(
np
.
allclose
(
dy_functional
,
expected
))
def
test_BCELoss_boardcast
(
self
):
input_np
=
np
.
random
.
uniform
(
0.1
,
0.8
,
size
=
(
2
,
3
,
4
,
10
)).
astype
(
np
.
float64
)
label_np
=
np
.
random
.
randint
(
0
,
2
,
size
=
(
3
,
4
,
10
)).
astype
(
np
.
float64
)
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
core
.
is_compiled_with_cuda
(
)
else
fluid
.
CPUPlace
()
static_result
=
test_static_layer
(
place
,
input_np
,
label_np
)
dy_result
=
test_dygraph_layer
(
place
,
input_np
,
label_np
)
expected
=
calc_bceloss
(
input_np
,
label_np
)
self
.
assertTrue
(
np
.
allclose
(
static_result
,
expected
))
self
.
assertTrue
(
np
.
allclose
(
static_result
,
dy_result
))
self
.
assertTrue
(
np
.
allclose
(
dy_result
,
expected
))
def
test_BCELoss_error
(
self
):
def
test_BCELoss_error
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
self
.
assertRaises
(
self
.
assertRaises
(
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
bf4a4636
...
@@ -157,19 +157,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
...
@@ -157,19 +157,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
reduction
)
reduction
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
one
=
_varbase_creator
(
dtype
=
input
.
dtype
)
out
=
core
.
ops
.
bce_loss
(
input
,
label
)
core
.
ops
.
fill_constant
(
one
,
'value'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
one
.
dtype
,
'str_value'
,
'1.0'
,
'shape'
,
[
1
])
one
.
stop_gradient
=
True
label_minus
=
core
.
ops
.
elementwise_sub
(
label
,
one
)
input_minus
=
core
.
ops
.
elementwise_sub
(
one
,
input
)
input_minus_log
=
core
.
ops
.
log
(
input_minus
)
input_log
=
core
.
ops
.
log
(
input
)
loss_1
=
core
.
ops
.
elementwise_mul
(
label_minus
,
input_minus_log
)
loss_2
=
core
.
ops
.
elementwise_mul
(
label
,
input_log
)
out
=
core
.
ops
.
elementwise_sub
(
loss_1
,
loss_2
)
if
weight
is
not
None
:
if
weight
is
not
None
:
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight
,
'axis'
,
-
1
)
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight
,
'axis'
,
-
1
)
...
@@ -187,17 +175,16 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
...
@@ -187,17 +175,16 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
fluid
.
data_feeder
.
check_variable_and_dtype
(
fluid
.
data_feeder
.
check_variable_and_dtype
(
label
,
'label'
,
[
'float32'
,
'float64'
],
'binary_cross_entropy'
)
label
,
'label'
,
[
'float32'
,
'float64'
],
'binary_cross_entropy'
)
one
=
paddle
.
fill_constant
(
shape
=
[
1
],
value
=
1.0
,
dtype
=
input
.
dtype
)
one
.
stop_gradient
=
True
label_minus
=
paddle
.
elementwise_sub
(
label
,
one
)
input_minus
=
paddle
.
elementwise_sub
(
one
,
input
)
input_minus_log
=
paddle
.
log
(
input_minus
)
input_log
=
paddle
.
log
(
input
)
loss_1
=
paddle
.
multiply
(
label_minus
,
input_minus_log
)
loss_2
=
paddle
.
multiply
(
label
,
input_log
)
sub_name
=
name
if
weight
is
None
and
reduction
is
'none'
else
None
sub_name
=
name
if
weight
is
None
and
reduction
is
'none'
else
None
out
=
paddle
.
elementwise_sub
(
loss_1
,
loss_2
,
name
=
sub_name
)
helper
=
LayerHelper
(
"binary_cross_entropy"
,
name
=
sub_name
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'bce_loss'
,
inputs
=
{
'X'
:
[
input
],
'Label'
:
[
label
],
},
outputs
=
{
'Out'
:
[
out
]})
if
weight
is
not
None
:
if
weight
is
not
None
:
if
isinstance
(
weight
,
paddle
.
framework
.
Variable
):
if
isinstance
(
weight
,
paddle
.
framework
.
Variable
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录