Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
83537c7a
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
83537c7a
编写于
12月 06, 2017
作者:
W
wanghaoshuang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix warning about comparison of integers of different signs
上级
229c2e78
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
11 addition
and
11 deletion
+11
-11
paddle/operators/nce_op.h
paddle/operators/nce_op.h
+11
-11
未找到文件。
paddle/operators/nce_op.h
浏览文件 @
83537c7a
...
...
@@ -49,7 +49,7 @@ void PrepareSamples(const framework::ExecutionContext& context) {
int
num_label
=
label_dims
.
size
()
==
2
?
label_dims
[
1
]
:
1
;
int
index
=
0
;
for
(
size
_t
i
=
0
;
i
<
label_dims
[
0
];
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
label_dims
[
0
];
++
i
)
{
int
j
=
0
;
for
(;
j
<
num_label
;
++
j
)
{
sample_labels_data
[
index
++
]
=
label_data
[
i
*
num_label
+
j
];
...
...
@@ -86,7 +86,7 @@ class NCEKernel : public framework::OpKernel<T> {
T
*
out_data
=
out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
int
num_neg_samples
=
context
.
Attr
<
int
>
(
"num_neg_samples"
);
int
num_total_classes
=
context
.
Attr
<
int
>
(
"num_total_classes"
);
int
num_true_class
=
1
;
int
64_t
num_true_class
=
1
;
if
(
label
!=
nullptr
)
{
num_true_class
=
label
->
dims
()[
1
];
}
...
...
@@ -95,18 +95,18 @@ class NCEKernel : public framework::OpKernel<T> {
auto
bias
=
context
.
Input
<
Tensor
>
(
"Bias"
);
if
(
bias
!=
nullptr
)
{
const
T
*
bias_data
=
bias
->
data
<
T
>
();
for
(
size
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
sample_out_data
[
i
]
=
bias_data
[
sample_labels_data
[
i
]];
}
}
else
{
for
(
size
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
sample_out_data
[
i
]
=
0
;
}
}
// forward mul
auto
input_mat
=
EigenMatrix
<
T
>::
From
(
*
(
context
.
Input
<
Tensor
>
(
"Input"
)));
auto
weight_mat
=
EigenMatrix
<
T
>::
From
(
*
(
context
.
Input
<
Tensor
>
(
"Weight"
)));
for
(
size
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
Eigen
::
Tensor
<
T
,
0
,
Eigen
::
RowMajor
,
Eigen
::
DenseIndex
>
result
=
(
input_mat
.
chip
((
int
)(
i
/
sample_labels
->
dims
()[
1
]),
0
)
*
weight_mat
.
chip
(
sample_labels_data
[
i
],
0
))
...
...
@@ -115,8 +115,8 @@ class NCEKernel : public framework::OpKernel<T> {
sample_out_data
[
i
]
=
(
1.
/
(
1.
+
exp
(
-
sample_out_data
[
i
])));
}
// forward cost
for
(
size
_t
i
=
0
;
i
<
sample_labels
->
dims
()[
0
];
++
i
)
{
size
_t
j
=
0
;
for
(
int64
_t
i
=
0
;
i
<
sample_labels
->
dims
()[
0
];
++
i
)
{
int64
_t
j
=
0
;
out_data
[
i
]
=
0
;
T
w
=
sample_weight
==
nullptr
?
1.
:
sample_weight_data
[
i
];
// for true classes
...
...
@@ -162,7 +162,7 @@ class NCEGradKernel : public framework::OpKernel<T> {
T
*
sample_grad_data
=
sample_grad
.
mutable_data
<
T
>
(
sample_labels
->
dims
(),
context
.
GetPlace
());
// backward cost
for
(
size
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
T
o
=
sample_out_data
[
i
];
T
w
=
sample_weight
==
nullptr
?
1
...
...
@@ -177,7 +177,7 @@ class NCEGradKernel : public framework::OpKernel<T> {
if
(
d_bias
!=
nullptr
)
{
T
*
d_bias_data
=
d_bias
->
mutable_data
<
T
>
(
context
.
GetPlace
());
std
::
fill
(
d_bias_data
,
d_bias_data
+
d_bias
->
numel
(),
0.0
);
for
(
size
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
d_bias_data
[
sample_labels_data
[
i
]]
+=
sample_grad_data
[
i
];
}
}
...
...
@@ -188,7 +188,7 @@ class NCEGradKernel : public framework::OpKernel<T> {
std
::
fill
(
d_w_data
,
d_w_data
+
d_w
->
numel
(),
0.0
);
auto
d_w_matrix
=
EigenMatrix
<
T
>::
From
(
*
d_w
);
auto
x_matrix
=
EigenMatrix
<
T
>::
From
(
*
(
context
.
Input
<
Tensor
>
(
"Input"
)));
for
(
size
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
d_w_matrix
.
chip
(
sample_labels_data
[
i
],
0
)
+=
x_matrix
.
chip
((
int
)(
i
/
sample_labels
->
dims
()[
1
]),
0
)
*
sample_grad_data
[
i
];
...
...
@@ -200,7 +200,7 @@ class NCEGradKernel : public framework::OpKernel<T> {
d_x
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
d_x_matrix
=
EigenMatrix
<
T
>::
From
(
*
d_x
);
auto
w_matrix
=
EigenMatrix
<
T
>::
From
(
*
(
context
.
Input
<
Tensor
>
(
"Weight"
)));
for
(
size
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
sample_labels
->
numel
();
++
i
)
{
d_x_matrix
.
chip
((
int
)(
i
/
sample_labels
->
dims
()[
1
]),
0
)
+=
w_matrix
.
chip
(
sample_labels_data
[
i
],
0
)
*
sample_grad_data
[
i
];
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录