Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
477a6a09
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
477a6a09
编写于
9月 25, 2017
作者:
G
guosheng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refine reduce_op, follow comments and remove ReduceGradEigenFreeKernel
上级
1295e5ef
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
38 addition
and
80 deletion
+38
-80
paddle/operators/reduce_op.cc
paddle/operators/reduce_op.cc
+11
-5
paddle/operators/reduce_op.h
paddle/operators/reduce_op.h
+27
-75
未找到文件。
paddle/operators/reduce_op.cc
浏览文件 @
477a6a09
...
...
@@ -18,7 +18,6 @@ namespace paddle {
namespace
operators
{
using
framework
::
Tensor
;
using
framework
::
LoDTensor
;
class
ReduceOp
:
public
framework
::
OperatorWithKernel
{
public:
...
...
@@ -46,7 +45,11 @@ class ReduceOp : public framework::OperatorWithKernel {
dims_vector
.
erase
(
dims_vector
.
begin
()
+
dim
);
}
auto
out_dims
=
framework
::
make_ddim
(
dims_vector
);
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
)
->
Resize
(
out_dims
);
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
)
->
Resize
(
out_dims
);
if
(
dim
!=
0
)
{
// Only pass LoD when not reducing on the first dim
ctx
.
ShareLoD
(
"X"
,
/*->*/
"Out"
);
}
}
};
...
...
@@ -81,9 +84,12 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker {
"X"
,
"(Tensor) The input tensor. Tensors with rank at most 6 are supported"
);
AddOutput
(
"Out"
,
"(Tensor) The result tensor."
);
AddAttr
<
int
>
(
"dim"
,
"(int, default 0) The dimension to reduce. "
"Must be in the range [-rank(input), rank(input))"
)
AddAttr
<
int
>
(
"dim"
,
"(int, default 1) The dimension to reduce. "
"Must be in the range [-rank(input), rank(input)). "
"If `dim < 0`, the dim to reduce is `rank + dim`. "
"Noting that reducing on the first dim will make the LoD info lost."
)
.
SetDefault
(
0
);
AddAttr
<
bool
>
(
"keep_dim"
,
"(bool, default false) "
...
...
paddle/operators/reduce_op.h
浏览文件 @
477a6a09
...
...
@@ -80,6 +80,8 @@ struct MaxOrMinGradFunctor {
auto
equals
=
x
==
y
.
broadcast
(
dim
);
auto
ones
=
dx
.
constant
(
1
);
auto
zeros
=
dx
.
constant
(
0
);
// If there are multiple minimum or maximum elements, the subgradient of
// each is the set [0, 1], and we pass gradient to all of them here.
dx
.
device
(
place
)
=
dy
.
broadcast
(
dim
)
*
equals
.
select
(
ones
,
zeros
);
}
};
...
...
@@ -145,102 +147,52 @@ class ReduceGradKernel : public framework::OpKernel {
int
rank
=
context
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
();
switch
(
rank
)
{
case
1
:
ReduceCompute
<
1
>
(
context
);
Reduce
Grad
Compute
<
1
>
(
context
);
break
;
case
2
:
ReduceCompute
<
2
>
(
context
);
Reduce
Grad
Compute
<
2
>
(
context
);
break
;
case
3
:
ReduceCompute
<
3
>
(
context
);
Reduce
Grad
Compute
<
3
>
(
context
);
break
;
case
4
:
ReduceCompute
<
4
>
(
context
);
Reduce
Grad
Compute
<
4
>
(
context
);
break
;
case
5
:
ReduceCompute
<
5
>
(
context
);
Reduce
Grad
Compute
<
5
>
(
context
);
break
;
case
6
:
ReduceCompute
<
6
>
(
context
);
Reduce
Grad
Compute
<
6
>
(
context
);
break
;
}
}
private:
template
<
size_t
D
>
void
ReduceCompute
(
const
framework
::
ExecutionContext
&
context
)
const
{
void
Reduce
Grad
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
{
auto
*
input0
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
input1
=
context
.
Input
<
Tensor
>
(
"Out"
);
auto
*
input2
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
output
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
if
(
output
!=
nullptr
)
{
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
x
=
EigenTensor
<
T
,
D
>::
From
(
*
input0
);
auto
x_grad
=
EigenTensor
<
T
,
D
>::
From
(
*
output
);
auto
x_rank
=
static_cast
<
int
>
(
x
.
dimensions
().
size
());
int
dim
=
static_cast
<
int
>
(
context
.
Attr
<
int
>
(
"dim"
));
if
(
dim
<
0
)
dim
=
x_rank
+
dim
;
DDim
dims
=
input0
->
dims
();
dims
[
dim
]
=
1
;
auto
x_reduce
=
EigenTensor
<
T
,
D
>::
From
(
*
input1
,
dims
);
auto
x_reduce_grad
=
EigenTensor
<
T
,
D
>::
From
(
*
input2
,
dims
);
Eigen
::
array
<
int
,
D
>
braodcast_dim
;
for
(
size_t
i
=
0
;
i
<
D
;
++
i
)
braodcast_dim
[
i
]
=
1
;
braodcast_dim
[
dim
]
=
input0
->
dims
()[
dim
];
auto
&
place
=
context
.
GetEigenDevice
<
Place
>
();
Functor
functor
;
functor
(
place
,
x
,
x_reduce
,
x_grad
,
x_reduce_grad
,
braodcast_dim
,
braodcast_dim
[
dim
]);
}
}
};
// For EigenTensor unsupported reduce
template
<
typename
T
,
typename
Functor
>
class
ReduceGradEigenFreeKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
x
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
*
out
=
context
.
Input
<
Tensor
>
(
"Out"
);
auto
*
x_grad
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
out_grad
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
if
(
x_grad
!=
nullptr
)
{
DDim
dims
=
x
->
dims
();
int
rank
=
dims
.
size
();
int
dim
=
static_cast
<
int
>
(
context
.
Attr
<
int
>
(
"dim"
));
if
(
dim
<
0
)
dim
=
rank
+
dim
;
auto
*
x_data
=
x
->
data
<
T
>
();
auto
*
x_grad_data
=
x_grad
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
*
out_data
=
out
->
data
<
T
>
();
auto
*
out_grad_data
=
out_grad
->
data
<
T
>
();
int
outer_count
=
1
;
int
inner_count
=
1
;
int
mid_count
=
dims
[
dim
];
for
(
int
i
=
0
;
i
<
dim
;
++
i
)
{
outer_count
*=
dims
[
i
];
}
for
(
int
i
=
dim
+
1
;
i
<
rank
;
++
i
)
{
inner_count
*=
dims
[
i
];
}
int
x_offset
=
0
;
// offset on raw data
int
out_offset
=
0
;
// offset on reduced data
Functor
functor
;
for
(
int
i
=
0
;
i
<
outer_count
;
++
i
)
{
for
(
int
j
=
0
;
j
<
inner_count
;
++
j
)
{
out_offset
=
inner_count
*
i
+
j
;
for
(
int
k
=
0
;
k
<
mid_count
;
++
k
)
{
x_offset
=
(
inner_count
*
mid_count
)
*
i
+
inner_count
*
k
+
j
;
functor
(
x_data
+
x_offset
,
out_data
+
out_offset
,
x_grad_data
+
x_offset
,
out_grad_data
+
out_offset
,
mid_count
);
}
}
}
}
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
x
=
EigenTensor
<
T
,
D
>::
From
(
*
input0
);
auto
x_grad
=
EigenTensor
<
T
,
D
>::
From
(
*
output
);
auto
x_rank
=
static_cast
<
int
>
(
x
.
dimensions
().
size
());
int
dim
=
static_cast
<
int
>
(
context
.
Attr
<
int
>
(
"dim"
));
if
(
dim
<
0
)
dim
=
x_rank
+
dim
;
DDim
dims
=
input0
->
dims
();
dims
[
dim
]
=
1
;
auto
x_reduce
=
EigenTensor
<
T
,
D
>::
From
(
*
input1
,
dims
);
auto
x_reduce_grad
=
EigenTensor
<
T
,
D
>::
From
(
*
input2
,
dims
);
Eigen
::
array
<
int
,
D
>
braodcast_dim
;
for
(
size_t
i
=
0
;
i
<
D
;
++
i
)
braodcast_dim
[
i
]
=
1
;
braodcast_dim
[
dim
]
=
input0
->
dims
()[
dim
];
auto
&
place
=
context
.
GetEigenDevice
<
Place
>
();
Functor
functor
;
functor
(
place
,
x
,
x_reduce
,
x_grad
,
x_reduce_grad
,
braodcast_dim
,
braodcast_dim
[
dim
]);
}
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录