Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
6b3e9ccb
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6b3e9ccb
编写于
9月 21, 2017
作者:
Y
Yibing Liu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
pass unit test for margin_rank_loss_op
上级
2f122561
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
45 addition
and
45 deletion
+45
-45
paddle/operators/margin_rank_loss_op.cc
paddle/operators/margin_rank_loss_op.cc
+24
-25
paddle/operators/margin_rank_loss_op.cu
paddle/operators/margin_rank_loss_op.cu
+6
-4
paddle/operators/margin_rank_loss_op.h
paddle/operators/margin_rank_loss_op.h
+5
-5
python/paddle/v2/framework/tests/test_margin_rank_loss_op.py
python/paddle/v2/framework/tests/test_margin_rank_loss_op.py
+10
-11
未找到文件。
paddle/operators/margin_rank_loss_op.cc
浏览文件 @
6b3e9ccb
...
...
@@ -19,11 +19,7 @@ namespace operators {
class
MarginRankLossOp
:
public
framework
::
OperatorWithKernel
{
public:
MarginRankLossOp
(
const
std
::
string
&
type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
:
OperatorWithKernel
(
type
,
inputs
,
outputs
,
attrs
)
{}
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
...
...
@@ -35,13 +31,11 @@ class MarginRankLossOp : public framework::OperatorWithKernel {
auto
label_dims
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Label"
)
->
dims
();
auto
x1_dims
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X1"
)
->
dims
();
auto
x2_dims
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X2"
)
->
dims
();
PADDLE_ENFORCE
((
label_dims
.
size
()
==
1
)
&&
(
x1_dims
.
size
()
==
1
)
&&
(
x2_dims
.
size
()
==
1
),
"The rank of all inputs must be 1."
);
PADDLE_ENFORCE
((
label_dims
==
x1_dims
)
&&
(
x1_dims
==
x2_dims
),
"All inputs must have the same size"
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
)
->
Resize
(
label_dims
);
PADDLE_ENFORCE
((
label_dims
==
x1_dims
)
&&
(
x1_dims
==
x2_dims
)
&&
(
label_dims
.
size
()
==
2
)
&&
(
label_dims
[
1
]
==
1
),
"All inputs must be vector with the same size"
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Activated"
)
->
Resize
(
label_dims
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
)
->
Resize
(
label_dims
);
}
};
...
...
@@ -51,18 +45,27 @@ class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker {
MarginRankLossOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"Label"
,
"The label indicating X1 ranked higher than X2 or not."
);
AddInput
(
"X1"
,
"The first input of MarginRankLossOp."
);
AddInput
(
"X2"
,
"The second input of MarginRankLossOp"
);
AddAttr
<
AttrType
>
(
"margin"
,
"Margin for MarginRankLossOp"
).
SetDefault
(
0
);
AddOutput
(
"Out"
,
"The output loss of MarginRankLoss operator"
);
AddInput
(
"X1"
,
"The first input of MarginRankLossOp, row vector."
);
AddInput
(
"X2"
,
"The second input of MarginRankLossOp, row vector."
);
AddInput
(
"Label"
,
"The label indicating X1 ranked higher than X2 "
"or not, row vector."
);
AddAttr
<
AttrType
>
(
"margin"
,
"Margin for MarginRankLossOp, scalar."
)
.
SetDefault
(
0
);
AddOutput
(
"Activated"
,
"Intermediate tensor to indicate "
"
whether
Output(Out) is activated"
)
"Intermediate tensor to indicate
whether each element of
"
"Output(Out) is activated"
)
.
AsIntermediate
();
AddComment
(
R"DOC(MarginRankLoss operator
AddOutput
(
"Out"
,
"The output loss of MarginRankLoss operator"
);
AddComment
(
R"DOC(
MarginRankLoss operator measures the loss given a pair of input {`X1`, `X2`}
and `Label` with attribuute `margin`, where `Label == 1` indicating X1 is
ranked higher than `X2`, otherwise `Label == -1`. The loss turns out
loss(X1, X2, Label) = max(0, -Label * (X1-X2) + margin)
loss(x1, x2, y) = max(0, -label * (x1-x2) + margin)
For batch input, `X1`, `X2` and `Label` all have the same size batch_size x 1.
)DOC"
);
}
...
...
@@ -70,11 +73,7 @@ loss(x1, x2, y) = max(0, -label * (x1-x2) + margin)
class
MarginRankLossGradOp
:
public
framework
::
OperatorWithKernel
{
public:
MarginRankLossGradOp
(
const
std
::
string
&
type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
:
OperatorWithKernel
(
type
,
inputs
,
outputs
,
attrs
)
{}
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
...
...
paddle/operators/margin_rank_loss_op.cu
浏览文件 @
6b3e9ccb
...
...
@@ -14,9 +14,11 @@
#include "paddle/operators/margin_rank_loss_op.h"
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
margin_rank_loss
,
paddle
::
operator
s
::
MarginRankLossKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
margin_rank_loss_grad
,
paddle
::
operators
::
MarginRankLossGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
op
s
::
MarginRankLossKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
margin_rank_loss_grad
,
ops
::
MarginRankLossGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/margin_rank_loss_op.h
浏览文件 @
6b3e9ccb
...
...
@@ -46,8 +46,8 @@ template <typename Place, typename T, typename AttrType = T>
class
MarginRankLossKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
auto
*
out_t
=
ctx
.
Output
<
framework
::
LoD
Tensor
>
(
"Out"
);
auto
*
act_t
=
ctx
.
Output
<
framework
::
LoD
Tensor
>
(
"Activated"
);
auto
*
out_t
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
*
act_t
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Activated"
);
auto
*
label_t
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Label"
);
auto
*
x1_t
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X1"
);
...
...
@@ -65,8 +65,8 @@ class MarginRankLossKernel : public framework::OpKernel {
auto
x2
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
x2_t
);
auto
&
dev
=
ctx
.
GetEigenDevice
<
Place
>
();
act
.
device
(
dev
)
=
(
-
label
*
(
x1
-
x2
)
+
margin
).
unaryExpr
(
Heaviside
<
T
>
());
out
.
device
(
dev
)
=
(
-
label
*
(
x1
-
x2
)
+
margin
).
unaryExpr
(
ReLU
<
T
>
());
act
.
device
(
dev
)
=
out
.
unaryExpr
(
Heaviside
<
T
>
());
}
};
...
...
@@ -78,15 +78,15 @@ class MarginRankLossGradKernel : public framework::OpKernel {
ctx
.
Output
<
framework
::
LoDTensor
>
(
framework
::
GradVarName
(
"X1"
));
auto
*
d_x2_t
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
framework
::
GradVarName
(
"X2"
));
auto
*
act_t
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Activated"
);
auto
*
act_t
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Activated"
);
auto
*
d_out_t
=
ctx
.
Input
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
label_t
=
ctx
.
Input
<
framework
::
Tensor
>
(
"Label"
);
auto
&
dev
=
ctx
.
GetEigenDevice
<
Place
>
();
auto
d_out
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
d_out_t
);
auto
act
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
act_t
);
auto
label
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
label_t
);
auto
&
dev
=
ctx
.
GetEigenDevice
<
Place
>
();
// compute d_x1
if
(
d_x1_t
)
{
...
...
python/paddle/v2/framework/tests/test_margin_rank_loss_op.py
浏览文件 @
6b3e9ccb
...
...
@@ -8,23 +8,23 @@ class TestMarginRankLossOp(OpTest):
self
.
op_type
=
"margin_rank_loss"
batch_size
=
5
margin
=
0.1
# labels_{i} = {0, 1.0} or {0, 0.5, 1.0}
label
=
np
.
random
.
randint
(
0
,
2
,
size
=
(
batch_size
,
)).
astype
(
"float32"
)
x1
=
np
.
random
.
random
((
batch_size
,
)).
astype
(
"float32"
)
x2
=
np
.
random
.
random
((
batch_size
,
)).
astype
(
"float32"
)
# labels_{i} = {-1, 1}
label
=
2
*
np
.
random
.
randint
(
0
,
2
,
size
=
(
batch_size
,
1
)).
astype
(
"float32"
)
-
1
x1
=
np
.
random
.
random
((
batch_size
,
1
)).
astype
(
"float32"
)
x2
=
np
.
random
.
random
((
batch_size
,
1
)).
astype
(
"float32"
)
# loss = max(0, -label * (x1 - x2) + margin)
loss
=
[
max
(
0
,
-
label
[
i
]
*
(
x1
[
i
]
-
x2
[
i
])
+
margin
)
for
i
in
range
(
batch_size
)
]
loss
=
-
label
*
(
x1
-
x2
)
+
margin
loss
=
np
.
where
(
loss
>
0
,
loss
,
0
)
act
=
np
.
where
(
loss
>
0
,
1.
,
0.
)
self
.
attrs
=
{
'margin'
:
margin
}
self
.
inputs
=
{
'Label'
:
label
,
'X1'
:
x1
,
'X2'
:
x2
}
self
.
outputs
=
{
'Out'
:
loss
}
self
.
outputs
=
{
'
Activated'
:
act
,
'
Out'
:
loss
}
def
test_check_output
(
self
):
self
.
check_output
()
"""
def
test_check_grad
(
self
):
self
.
check_grad
([
"X1"
,
"X2"
],
"Out"
)
...
...
@@ -33,7 +33,6 @@ class TestMarginRankLossOp(OpTest):
def
test_check_grad_ignore_x2
(
self
):
self
.
check_grad
([
"X1"
],
"Out"
,
no_grad_set
=
set
(
'X2'
))
"""
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录