Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
57f9723d
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
57f9723d
编写于
9月 06, 2017
作者:
Y
yangyaming
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Using EigenVector to replace EigenMatrix for some variables.
上级
9802c425
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
13 addition
and
10 deletion
+13
-10
paddle/operators/squared_l2_distance_op.h
paddle/operators/squared_l2_distance_op.h
+13
-10
未找到文件。
paddle/operators/squared_l2_distance_op.h
浏览文件 @
57f9723d
...
...
@@ -20,6 +20,9 @@ namespace paddle {
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
...
...
@@ -46,7 +49,7 @@ class SquaredL2DistanceKernel : public framework::OpKernel {
out0
->
mutable_data
<
T
>
(
context
.
GetPlace
());
out1
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
sub_result
=
EigenMatrix
<
T
>::
From
(
*
out0
);
auto
z
=
Eigen
Matrix
<
T
>::
From
(
*
out1
);
auto
z
=
Eigen
Vector
<
T
>::
Flatten
(
*
out1
);
auto
place
=
context
.
GetEigenDevice
<
Place
>
();
auto
x_dims
=
x
.
dimensions
();
...
...
@@ -55,13 +58,12 @@ class SquaredL2DistanceKernel : public framework::OpKernel {
if
(
y_dims
[
0
]
==
1
&&
x_dims
[
0
]
>
y_dims
[
0
])
{
sub_result
.
device
(
place
)
=
x
-
y
.
broadcast
(
Eigen
::
array
<
int
,
2
>
({
static_cast
<
int
>
(
x_dims
[
0
]),
1
}));
y
.
broadcast
(
Eigen
::
array
<
int
,
2
>
({
{
static_cast
<
int
>
(
x_dims
[
0
]),
1
}
}));
}
else
{
sub_result
.
device
(
place
)
=
x
-
y
;
}
auto
sub_res_pow2
=
sub_result
*
sub_result
;
// z is TensorMap, no need reshape
z
.
device
(
place
)
=
sub_res_pow2
.
sum
(
Eigen
::
array
<
int
,
1
>
({
1
}));
z
.
device
(
place
)
=
sub_res_pow2
.
sum
(
Eigen
::
array
<
int
,
1
>
({{
1
}}));
}
};
...
...
@@ -82,8 +84,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel {
int
cols
=
framework
::
product
(
x_dims
)
/
x_dims
[
0
];
// calculate gradient
auto
grad_mat
=
2
*
(
out_grad
.
broadcast
(
Eigen
::
array
<
int
,
2
>
({
1
,
cols
})))
*
sub_result
;
auto
grad_mat
=
2
*
(
out_grad
.
broadcast
(
Eigen
::
array
<
int
,
2
>
({{
1
,
cols
}})))
*
sub_result
;
// propagate back to input
auto
eigen_place
=
context
.
GetEigenDevice
<
Place
>
();
...
...
@@ -98,18 +101,18 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel {
if
(
y_g
)
{
y_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
y_grad
=
EigenMatrix
<
T
>::
From
(
*
y_g
,
framework
::
make_ddim
({
y_dims
[
0
],
cols
}));
PADDLE_ENFORCE_GE
(
sub_result
.
dimensions
()[
0
],
y_dims
[
0
],
"First dimension of gradient must be greater or "
"equal than first dimension of target."
);
if
(
sub_result
.
dimensions
()[
0
]
==
y_dims
[
0
])
{
auto
y_grad
=
EigenMatrix
<
T
>::
From
(
*
y_g
,
framework
::
make_ddim
({
y_dims
[
0
],
cols
}));
y_grad
.
device
(
eigen_place
)
=
-
1
*
grad_mat
;
}
else
{
auto
col_sum_res
=
-
1
*
(
grad_mat
.
sum
(
Eigen
::
array
<
int
,
1
>
({
0
})));
// y_grad is TensorMap, no need reshape
auto
col_sum_res
=
-
1
*
(
grad_mat
.
sum
(
Eigen
::
array
<
int
,
1
>
({
{
0
}
})));
auto
y_grad
=
EigenVector
<
T
>::
Flatten
(
*
y_g
);
y_grad
.
device
(
eigen_place
)
=
col_sum_res
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录