Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
7f49b9ba
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
7f49b9ba
编写于
8月 24, 2022
作者:
W
WangZhen
提交者:
GitHub
8月 24, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Adapt tensor axis for cumsum (#45372)
上级
62b5452d
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
98 addition
and
11 deletion
+98
-11
paddle/fluid/operators/cum_op.cc
paddle/fluid/operators/cum_op.cc
+10
-2
paddle/phi/api/yaml/legacy_api.yaml
paddle/phi/api/yaml/legacy_api.yaml
+2
-2
paddle/phi/api/yaml/legacy_backward.yaml
paddle/phi/api/yaml/legacy_backward.yaml
+2
-2
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+9
-0
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+7
-0
paddle/phi/kernels/cpu/cum_kernel.cc
paddle/phi/kernels/cpu/cum_kernel.cc
+2
-2
paddle/phi/kernels/cum_kernel.h
paddle/phi/kernels/cum_kernel.h
+2
-1
paddle/phi/kernels/gpu/cum_kernel.cu
paddle/phi/kernels/gpu/cum_kernel.cu
+2
-2
python/paddle/fluid/tests/unittests/test_cumsum_op.py
python/paddle/fluid/tests/unittests/test_cumsum_op.py
+62
-0
未找到文件。
paddle/fluid/operators/cum_op.cc
浏览文件 @
7f49b9ba
...
@@ -24,6 +24,13 @@ namespace operators {
...
@@ -24,6 +24,13 @@ namespace operators {
class
CumOp
:
public
framework
::
OperatorWithKernel
{
class
CumOp
:
public
framework
::
OperatorWithKernel
{
public:
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
input_data_type
=
framework
::
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"X"
);
return
framework
::
OpKernelType
(
input_data_type
,
ctx
.
GetPlace
());
}
};
};
class
CumsumOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
class
CumsumOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
...
@@ -34,7 +41,8 @@ class CumsumOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -34,7 +41,8 @@ class CumsumOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr
<
int
>
(
"axis"
,
AddAttr
<
int
>
(
"axis"
,
"The dimension to accumulate along. -1 means the last "
"The dimension to accumulate along. -1 means the last "
"dimension [default -1]."
)
"dimension [default -1]."
)
.
SetDefault
(
-
1
);
.
SetDefault
(
-
1
)
.
SupportTensor
();
AddAttr
<
bool
>
(
"flatten"
,
AddAttr
<
bool
>
(
"flatten"
,
"Whether to compute the cumsum over the flattened array. "
"Whether to compute the cumsum over the flattened array. "
"[default false]."
)
"[default false]."
)
...
@@ -148,7 +156,7 @@ namespace ops = paddle::operators;
...
@@ -148,7 +156,7 @@ namespace ops = paddle::operators;
using
CPU
=
phi
::
CPUContext
;
using
CPU
=
phi
::
CPUContext
;
DECLARE_INFER_SHAPE_FUNCTOR
(
cumsum
,
DECLARE_INFER_SHAPE_FUNCTOR
(
cumsum
,
CumsumInferShapeFunctor
,
CumsumInferShapeFunctor
,
PD_INFER_META
(
phi
::
CumInferMeta
));
PD_INFER_META
(
phi
::
Cum
ScalarAxis
InferMeta
));
DECLARE_INFER_SHAPE_FUNCTOR
(
logcumsumexp
,
DECLARE_INFER_SHAPE_FUNCTOR
(
logcumsumexp
,
LogcumsumexpInferShapeFunctor
,
LogcumsumexpInferShapeFunctor
,
PD_INFER_META
(
phi
::
CumInferMeta
));
PD_INFER_META
(
phi
::
CumInferMeta
));
...
...
paddle/phi/api/yaml/legacy_api.yaml
浏览文件 @
7f49b9ba
...
@@ -638,10 +638,10 @@
...
@@ -638,10 +638,10 @@
backward
:
cumprod_grad
backward
:
cumprod_grad
-
api
:
cumsum
-
api
:
cumsum
args
:
(Tensor x,
int
axis, bool flatten, bool exclusive, bool reverse)
args
:
(Tensor x,
Scalar
axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(out)
output
:
Tensor(out)
infer_meta
:
infer_meta
:
func
:
CumInferMeta
func
:
Cum
ScalarAxis
InferMeta
kernel
:
kernel
:
func
:
cumsum
func
:
cumsum
backward
:
cumsum_grad
backward
:
cumsum_grad
...
...
paddle/phi/api/yaml/legacy_backward.yaml
浏览文件 @
7f49b9ba
...
@@ -591,11 +591,11 @@
...
@@ -591,11 +591,11 @@
func
:
cumprod_grad
func
:
cumprod_grad
-
backward_api
:
cumsum_grad
-
backward_api
:
cumsum_grad
forward
:
cumsum(Tensor x,
int
axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
forward
:
cumsum(Tensor x,
Scalar
axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
x
]
param
:
[
x
]
args
:
(Tensor out_grad,
int
axis, bool flatten, bool exclusive, bool reverse)
args
:
(Tensor out_grad,
Scalar
axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
invoke
:
cumsum(out_grad, axis, flatten, exclusive, !reverse)
invoke
:
cumsum(out_grad, axis, flatten, exclusive, !reverse)
...
...
paddle/phi/infermeta/unary.cc
浏览文件 @
7f49b9ba
...
@@ -405,6 +405,15 @@ void CumInferMeta(const MetaTensor& x,
...
@@ -405,6 +405,15 @@ void CumInferMeta(const MetaTensor& x,
out
->
share_lod
(
x
);
out
->
share_lod
(
x
);
}
}
void
CumScalarAxisInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
axis
,
bool
flatten
,
bool
exclusive
,
bool
reverse
,
MetaTensor
*
out
)
{
CumInferMeta
(
x
,
axis
.
to
<
int
>
(),
flatten
,
exclusive
,
reverse
,
out
);
}
void
CropTensorInferMeta
(
const
MetaTensor
&
x
,
void
CropTensorInferMeta
(
const
MetaTensor
&
x
,
const
IntArray
&
shape
,
const
IntArray
&
shape
,
const
IntArray
&
offsets
,
const
IntArray
&
offsets
,
...
...
paddle/phi/infermeta/unary.h
浏览文件 @
7f49b9ba
...
@@ -95,6 +95,13 @@ void CumInferMeta(const MetaTensor& x,
...
@@ -95,6 +95,13 @@ void CumInferMeta(const MetaTensor& x,
bool
reverse
,
bool
reverse
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
CumScalarAxisInferMeta
(
const
MetaTensor
&
x
,
const
Scalar
&
axis
,
bool
flatten
,
bool
exclusive
,
bool
reverse
,
MetaTensor
*
out
);
void
DecodeJpegInferMeta
(
const
MetaTensor
&
x
,
void
DecodeJpegInferMeta
(
const
MetaTensor
&
x
,
const
std
::
string
&
mode
,
const
std
::
string
&
mode
,
MetaTensor
*
out
);
MetaTensor
*
out
);
...
...
paddle/phi/kernels/cpu/cum_kernel.cc
浏览文件 @
7f49b9ba
...
@@ -135,7 +135,7 @@ void ScanKernel(const Context& dev_ctx,
...
@@ -135,7 +135,7 @@ void ScanKernel(const Context& dev_ctx,
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
CumsumKernel
(
const
Context
&
dev_ctx
,
void
CumsumKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
int
axis
,
const
Scalar
&
axis
,
bool
flatten
,
bool
flatten
,
bool
exclusive
,
bool
exclusive
,
bool
reverse
,
bool
reverse
,
...
@@ -143,7 +143,7 @@ void CumsumKernel(const Context& dev_ctx,
...
@@ -143,7 +143,7 @@ void CumsumKernel(const Context& dev_ctx,
using
Reducer
=
Eigen
::
internal
::
SumReducer
<
T
>
;
using
Reducer
=
Eigen
::
internal
::
SumReducer
<
T
>
;
auto
reducer
=
Reducer
();
auto
reducer
=
Reducer
();
ScanKernel
<
T
,
Context
,
Reducer
>
(
ScanKernel
<
T
,
Context
,
Reducer
>
(
dev_ctx
,
x
,
axis
,
flatten
,
exclusive
,
reverse
,
reducer
,
out
);
dev_ctx
,
x
,
axis
.
to
<
int
>
()
,
flatten
,
exclusive
,
reverse
,
reducer
,
out
);
}
}
template
<
typename
T
>
template
<
typename
T
>
...
...
paddle/phi/kernels/cum_kernel.h
浏览文件 @
7f49b9ba
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
#pragma once
#pragma once
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/dense_tensor.h"
namespace
phi
{
namespace
phi
{
...
@@ -21,7 +22,7 @@ namespace phi {
...
@@ -21,7 +22,7 @@ namespace phi {
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
CumsumKernel
(
const
Context
&
dev_ctx
,
void
CumsumKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
int
axis
,
const
Scalar
&
axis
,
bool
flatten
,
bool
flatten
,
bool
exclusive
,
bool
exclusive
,
bool
reverse
,
bool
reverse
,
...
...
paddle/phi/kernels/gpu/cum_kernel.cu
浏览文件 @
7f49b9ba
...
@@ -353,7 +353,7 @@ void ScanKernel(const Context& dev_ctx,
...
@@ -353,7 +353,7 @@ void ScanKernel(const Context& dev_ctx,
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
CumsumKernel
(
const
Context
&
dev_ctx
,
void
CumsumKernel
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
const
DenseTensor
&
x
,
int
axis
,
const
Scalar
&
axis
,
bool
flatten
,
bool
flatten
,
bool
exclusive
,
bool
exclusive
,
bool
reverse
,
bool
reverse
,
...
@@ -361,7 +361,7 @@ void CumsumKernel(const Context& dev_ctx,
...
@@ -361,7 +361,7 @@ void CumsumKernel(const Context& dev_ctx,
using
Op
=
cub
::
Sum
;
using
Op
=
cub
::
Sum
;
auto
op
=
Op
();
auto
op
=
Op
();
ScanKernel
<
T
,
Context
,
Op
>
(
ScanKernel
<
T
,
Context
,
Op
>
(
dev_ctx
,
x
,
axis
,
flatten
,
exclusive
,
reverse
,
op
,
out
);
dev_ctx
,
x
,
axis
.
to
<
int
>
()
,
flatten
,
exclusive
,
reverse
,
op
,
out
);
}
}
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
...
...
python/paddle/fluid/tests/unittests/test_cumsum_op.py
浏览文件 @
7f49b9ba
...
@@ -14,13 +14,16 @@
...
@@ -14,13 +14,16 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
os
import
unittest
import
unittest
import
tempfile
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
op_test
import
OpTest
import
paddle
import
paddle
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
paddle.fluid
import
compiler
,
Program
,
program_guard
import
paddle.inference
as
paddle_infer
class
TestCumsumOp
(
unittest
.
TestCase
):
class
TestCumsumOp
(
unittest
.
TestCase
):
...
@@ -318,5 +321,64 @@ class BadInputTest(unittest.TestCase):
...
@@ -318,5 +321,64 @@ class BadInputTest(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
test_bad_x
)
self
.
assertRaises
(
TypeError
,
test_bad_x
)
class
TestTensorAxis
(
unittest
.
TestCase
):
def
setUp
(
self
):
paddle
.
seed
(
2022
)
self
.
temp_dir
=
tempfile
.
TemporaryDirectory
()
self
.
save_path
=
os
.
path
.
join
(
self
.
temp_dir
.
name
,
'tensor_axis_cumsum'
)
self
.
place
=
paddle
.
CUDAPlace
(
0
)
if
paddle
.
is_compiled_with_cuda
()
else
paddle
.
CPUPlace
()
def
test_dygraph
(
self
):
paddle
.
disable_static
()
x
=
np
.
random
.
randn
(
5
,
6
)
axis
=
1
np_out
=
np
.
cumsum
(
x
,
axis
)
pd_out
=
paddle
.
cumsum
(
paddle
.
to_tensor
(
x
),
axis
=
paddle
.
to_tensor
([
axis
],
dtype
=
'int32'
))
np
.
testing
.
assert_allclose
(
np_out
,
pd_out
.
numpy
())
def
test_static_and_infer
(
self
):
paddle
.
enable_static
()
np_x
=
np
.
random
.
randn
(
9
,
10
,
11
).
astype
(
'float32'
)
main_prog
=
paddle
.
static
.
Program
()
starup_prog
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_prog
,
starup_prog
):
# run static
x
=
paddle
.
static
.
data
(
shape
=
np_x
.
shape
,
name
=
'x'
,
dtype
=
np_x
.
dtype
)
print
(
x
)
linear
=
paddle
.
nn
.
Linear
(
np_x
.
shape
[
-
1
],
np_x
.
shape
[
-
1
])
linear_out
=
linear
(
x
)
relu_out
=
paddle
.
nn
.
functional
.
relu
(
linear_out
)
axis
=
paddle
.
full
([
1
],
2
,
dtype
=
'int64'
)
out
=
paddle
.
cumsum
(
relu_out
,
axis
=
axis
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
exe
.
run
(
starup_prog
)
static_out
=
exe
.
run
(
feed
=
{
'x'
:
np_x
},
fetch_list
=
[
out
])
# run infer
paddle
.
static
.
save_inference_model
(
self
.
save_path
,
[
x
],
[
out
],
exe
)
config
=
paddle_infer
.
Config
(
self
.
save_path
+
'.pdmodel'
,
self
.
save_path
+
'.pdiparams'
)
if
paddle
.
is_compiled_with_cuda
():
config
.
enable_use_gpu
(
100
,
0
)
else
:
config
.
disable_gpu
()
predictor
=
paddle_infer
.
create_predictor
(
config
)
input_names
=
predictor
.
get_input_names
()
input_handle
=
predictor
.
get_input_handle
(
input_names
[
0
])
fake_input
=
np_x
input_handle
.
reshape
(
np_x
.
shape
)
input_handle
.
copy_from_cpu
(
fake_input
)
predictor
.
run
()
output_names
=
predictor
.
get_output_names
()
output_handle
=
predictor
.
get_output_handle
(
output_names
[
0
])
infer_out
=
output_handle
.
copy_to_cpu
()
np
.
testing
.
assert_allclose
(
static_out
[
0
],
infer_out
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录