Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
f18e8a7a
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f18e8a7a
编写于
12月 20, 2018
作者:
H
heqiaozhi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove some comments & refine doc & put template class in .h
test=develop
上级
754a5f88
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
101 addition
and
113 deletion
+101
-113
paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc
paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc
+8
-102
paddle/fluid/operators/teacher_student_sigmoid_loss_op.h
paddle/fluid/operators/teacher_student_sigmoid_loss_op.h
+93
-0
python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py
...d/tests/unittests/test_teacher_student_sigmoid_loss_op.py
+0
-11
未找到文件。
paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc
浏览文件 @
f18e8a7a
...
...
@@ -115,18 +115,22 @@ class TeacherStudentSigmoidLossOpMaker
AddOutput
(
"Y"
,
"(Tensor, default Tensor<float>), a 2-D tensor with shape "
"[N x 1]. The teacher student sigmoid loss."
);
AddAttr
<
float
>
(
"soft_max_up_bound"
,
"fp32, default 15.0"
).
SetDefault
(
15.0
);
AddAttr
<
float
>
(
"soft_max_lower_bound"
,
"fp32, default -15.0"
)
AddAttr
<
float
>
(
"soft_max_up_bound"
,
"fp32, if input > soft_max_up_bound, will be bound, default 15.0"
)
.
SetDefault
(
15.0
);
AddAttr
<
float
>
(
"soft_max_lower_bound"
,
"fp32, if input < soft_max_lower_bound, will be bound, default -15.0"
)
.
SetDefault
(
-
15.0
);
AddComment
(
R"DOC(
TeacherStudentSigmoidLoss Operator.
TeacherStudentSigmoidLoss Operator.
It's similarity to SigmoidCrossEntropyWithLogits Operator. The difference is that
we add another label(z') to original.
loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' + log(1 + exp(-abs(x)))
z is click or not
z' is
value q of feed_fine
z' is
teacher value
label = {-2, -1, [0, 2]}
when z' is not exist, clk = 0 : label = -2;
when z' is not exist, clk = 1 : label = -1;
...
...
@@ -137,104 +141,6 @@ we add another label(z') to original.
}
};
// template <typename DeviceContext, typename T>
template
<
typename
T
>
class
TeacherStudentSigmoidLossOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
PADDLE_ENFORCE
(
platform
::
is_cpu_place
(
context
.
GetPlace
()),
"This kernel only runs on CPU."
);
Tensor
*
y
=
context
.
Output
<
Tensor
>
(
"Y"
);
const
Tensor
*
x
=
context
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
labels
=
context
.
Input
<
Tensor
>
(
"Label"
);
T
*
y_data
=
y
->
mutable_data
<
T
>
(
context
.
GetPlace
());
const
T
*
x_data
=
x
->
data
<
T
>
();
const
T
*
label_data
=
labels
->
data
<
T
>
();
int64_t
batch_size
=
x
->
dims
()[
0
];
// loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' +
// log(1 + exp(-abs(x)))
// z is click or not
// z' is value q of feed_fine
// label = {-2, -1, [0, 2]}
// when z' is not exist, clk = 0 : label = -2;
// when z' is not exist, clk = 1 : label = -1;
// when z' is exist , clk = 0 : label = 0 + z';
// when z' is exist , clk = 1 : label = 1 + z';
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
if
(
label_data
[
i
]
<
-
1.0
)
{
y_data
[
i
]
=
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])));
}
else
if
(
label_data
[
i
]
<
0.0
)
{
y_data
[
i
]
=
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
-
x_data
[
i
]
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])));
}
else
if
(
label_data
[
i
]
<
1.0
)
{
y_data
[
i
]
=
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])))
+
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
-
x_data
[
i
]
*
label_data
[
i
]
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])));
}
else
{
y_data
[
i
]
=
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
-
x_data
[
i
]
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])))
+
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
-
x_data
[
i
]
*
(
label_data
[
i
]
-
1.0
)
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])));
}
}
}
};
template
<
typename
T
>
class
TeacherStudentSigmoidLossGradOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
Tensor
*
x
=
context
.
Input
<
Tensor
>
(
"X"
);
const
T
*
x_data
=
x
->
data
<
T
>
();
Tensor
*
dx
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
T
*
dx_data
=
dx
->
mutable_data
<
T
>
(
context
.
GetPlace
());
const
Tensor
*
labels
=
context
.
Input
<
Tensor
>
(
"Label"
);
const
T
*
label_data
=
labels
->
data
<
T
>
();
T
soft_max_up_bound
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"soft_max_up_bound"
));
T
soft_max_lower_bound
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"soft_max_lower_bound"
));
int64_t
batch_size
=
x
->
dims
()[
0
];
const
framework
::
Tensor
*
dOut
=
context
.
Input
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
const
T
*
dout_data
=
dOut
->
data
<
T
>
();
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
T
sum_val
=
x_data
[
i
];
if
(
sum_val
>
soft_max_up_bound
)
{
sum_val
=
soft_max_up_bound
;
}
else
{
if
(
sum_val
<
soft_max_lower_bound
)
{
sum_val
=
soft_max_lower_bound
;
}
}
T
pred
=
1.0
/
(
1.0
+
exp
(
-
sum_val
));
if
(
label_data
[
i
]
<
-
1.0
)
{
dx_data
[
i
]
=
0.0
-
pred
;
}
else
if
(
label_data
[
i
]
<
0.0
)
{
dx_data
[
i
]
=
1.0
-
pred
;
}
else
{
dx_data
[
i
]
=
label_data
[
i
]
-
2.0
*
pred
;
}
if
(
sum_val
>=
soft_max_up_bound
||
sum_val
<=
soft_max_lower_bound
)
{
dx_data
[
i
]
=
0
;
}
dx_data
[
i
]
*=
dout_data
[
i
]
*
-
1
;
}
}
};
}
// namespace operators
}
// namespace paddle
...
...
paddle/fluid/operators/teacher_student_sigmoid_loss_op.h
浏览文件 @
f18e8a7a
...
...
@@ -20,6 +20,99 @@ namespace paddle {
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
>
class
TeacherStudentSigmoidLossOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
Tensor
*
y
=
context
.
Output
<
Tensor
>
(
"Y"
);
const
Tensor
*
x
=
context
.
Input
<
Tensor
>
(
"X"
);
const
Tensor
*
labels
=
context
.
Input
<
Tensor
>
(
"Label"
);
T
*
y_data
=
y
->
mutable_data
<
T
>
(
context
.
GetPlace
());
const
T
*
x_data
=
x
->
data
<
T
>
();
const
T
*
label_data
=
labels
->
data
<
T
>
();
int64_t
batch_size
=
x
->
dims
()[
0
];
// loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' +
// log(1 + exp(-abs(x)))
// z is click or not
// z' is value q of feed_fine
// label = {-2, -1, [0, 2]}
// when z' is not exist, clk = 0 : label = -2;
// when z' is not exist, clk = 1 : label = -1;
// when z' is exist , clk = 0 : label = 0 + z';
// when z' is exist , clk = 1 : label = 1 + z';
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
if
(
label_data
[
i
]
<
-
1.0
)
{
y_data
[
i
]
=
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])));
}
else
if
(
label_data
[
i
]
<
0.0
)
{
y_data
[
i
]
=
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
-
x_data
[
i
]
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])));
}
else
if
(
label_data
[
i
]
<
1.0
)
{
y_data
[
i
]
=
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])))
+
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
-
x_data
[
i
]
*
label_data
[
i
]
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])));
}
else
{
y_data
[
i
]
=
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
-
x_data
[
i
]
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])))
+
(
x_data
[
i
]
>
0
?
x_data
[
i
]
:
0.0
)
-
x_data
[
i
]
*
(
label_data
[
i
]
-
1.0
)
+
log
(
1.0
+
exp
(
-
fabs
(
x_data
[
i
])));
}
}
}
};
template
<
typename
T
>
class
TeacherStudentSigmoidLossGradOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
Tensor
*
x
=
context
.
Input
<
Tensor
>
(
"X"
);
const
T
*
x_data
=
x
->
data
<
T
>
();
Tensor
*
dx
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
T
*
dx_data
=
dx
->
mutable_data
<
T
>
(
context
.
GetPlace
());
const
Tensor
*
labels
=
context
.
Input
<
Tensor
>
(
"Label"
);
const
T
*
label_data
=
labels
->
data
<
T
>
();
T
soft_max_up_bound
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"soft_max_up_bound"
));
T
soft_max_lower_bound
=
static_cast
<
T
>
(
context
.
Attr
<
float
>
(
"soft_max_lower_bound"
));
int64_t
batch_size
=
x
->
dims
()[
0
];
const
framework
::
Tensor
*
dOut
=
context
.
Input
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
const
T
*
dout_data
=
dOut
->
data
<
T
>
();
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
T
sum_val
=
x_data
[
i
];
if
(
sum_val
>
soft_max_up_bound
)
{
sum_val
=
soft_max_up_bound
;
}
else
{
if
(
sum_val
<
soft_max_lower_bound
)
{
sum_val
=
soft_max_lower_bound
;
}
}
T
pred
=
1.0
/
(
1.0
+
exp
(
-
sum_val
));
if
(
label_data
[
i
]
<
-
1.0
)
{
dx_data
[
i
]
=
0.0
-
pred
;
}
else
if
(
label_data
[
i
]
<
0.0
)
{
dx_data
[
i
]
=
1.0
-
pred
;
}
else
{
dx_data
[
i
]
=
label_data
[
i
]
-
2.0
*
pred
;
}
if
(
sum_val
>=
soft_max_up_bound
||
sum_val
<=
soft_max_lower_bound
)
{
dx_data
[
i
]
=
0
;
}
dx_data
[
i
]
*=
dout_data
[
i
]
*
-
1
;
}
}
};
}
// namespace operators
}
// namespace paddle
python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py
浏览文件 @
f18e8a7a
...
...
@@ -27,9 +27,6 @@ class TestTeacherStudentSigmoidLossOp(OpTest):
"""
def
setUp
(
self
):
"""
ut
"""
self
.
op_type
=
"teacher_student_sigmoid_loss"
batch_size
=
16
num_classes
=
1
...
...
@@ -50,21 +47,13 @@ class TestTeacherStudentSigmoidLossOp(OpTest):
elif
label
<
1.0
:
outs
.
append
(
max
(
x
,
0.0
)
+
log
(
1.0
+
exp
(
-
abs
(
x
)))
+
\
max
(
x
,
0.0
)
-
x
*
label
+
log
(
1.0
+
exp
(
-
abs
(
x
))))
#print "33 python x:", x, "python label:", label, "term1:", max(x, 0.0) + log(1.0 + exp(-abs(x))), "term2:", max(x, 0.0) - x * label + log(1.0 + exp(-abs(x)))
else
:
outs
.
append
(
max
(
x
,
0.0
)
-
x
+
log
(
1.0
+
exp
(
-
abs
(
x
)))
+
\
max
(
x
,
0.0
)
-
x
*
(
label
-
1.0
)
+
log
(
1.0
+
exp
(
-
abs
(
x
))))
#print "44 python x:", x, "python label:", label, "term1:", max(x, 0.0) - x + log(1.0 + exp(-abs(x))), "term2:", max(x, 0.0) - x * (label - 1.0) + log(1.0 + exp(-abs(x)))
self
.
outputs
=
{
'Y'
:
np
.
array
(
outs
)}
def
test_check_output
(
self
):
"""
ut
"""
self
.
check_output
()
def
test_check_grad
(
self
):
"""
ut
"""
self
.
check_grad
([
"X"
],
"Y"
,
numeric_grad_delta
=
0.005
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录