Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
58c99030
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
58c99030
编写于
4月 12, 2022
作者:
H
hong
提交者:
GitHub
4月 12, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add conj pixel shuffle yaml (#41499) (#41616)
* ad conj flip yaml * add flip conj pixel shuffle
上级
63f573ad
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
91 addition
and
44 deletion
+91
-44
paddle/fluid/operators/pixel_shuffle_op.cc
paddle/fluid/operators/pixel_shuffle_op.cc
+5
-37
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+30
-0
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+5
-0
python/paddle/fluid/tests/unittests/test_conj_op.py
python/paddle/fluid/tests/unittests/test_conj_op.py
+4
-2
python/paddle/fluid/tests/unittests/test_flip.py
python/paddle/fluid/tests/unittests/test_flip.py
+4
-2
python/paddle/fluid/tests/unittests/test_pixel_shuffle.py
python/paddle/fluid/tests/unittests/test_pixel_shuffle.py
+4
-2
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+4
-0
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+3
-0
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+3
-1
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+29
-0
未找到文件。
paddle/fluid/operators/pixel_shuffle_op.cc
浏览文件 @
58c99030
...
...
@@ -82,42 +82,6 @@ class PixelShuffleGradMaker : public framework::SingleGradOpMaker<T> {
class
PixelShuffleGradOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
platform
::
errors
::
NotFound
(
"Input(Out@Grad) should not be null"
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)),
true
,
platform
::
errors
::
NotFound
(
"Output(X@Grad) should not be null"
));
auto
do_dims
=
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"Out"
));
PADDLE_ENFORCE_EQ
(
do_dims
.
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
do_dims
.
size
()));
auto
upscale_factor
=
ctx
->
Attrs
().
Get
<
int
>
(
"upscale_factor"
);
const
std
::
string
data_format
=
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"data_format"
);
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
auto
dx_dims
=
do_dims
;
dx_dims
[
0
]
=
do_dims
[
0
];
if
(
!
channel_last
)
{
dx_dims
[
1
]
=
do_dims
[
1
]
*
(
upscale_factor
*
upscale_factor
);
dx_dims
[
2
]
=
do_dims
[
2
]
/
upscale_factor
;
dx_dims
[
3
]
=
do_dims
[
3
]
/
upscale_factor
;
}
else
{
dx_dims
[
1
]
=
do_dims
[
1
]
/
upscale_factor
;
dx_dims
[
2
]
=
do_dims
[
2
]
/
upscale_factor
;
dx_dims
[
3
]
=
do_dims
[
3
]
*
(
upscale_factor
*
upscale_factor
);
}
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
dx_dims
);
}
};
}
// namespace operators
...
...
@@ -132,7 +96,11 @@ REGISTER_OPERATOR(pixel_shuffle, ops::PixelShuffleOp, ops::PixelShuffleOpMaker,
ops
::
PixelShuffleGradMaker
<
paddle
::
imperative
::
OpBase
>
,
PixelShuffleInferShapeFunctor
);
REGISTER_OPERATOR
(
pixel_shuffle_grad
,
ops
::
PixelShuffleGradOp
);
DECLARE_INFER_SHAPE_FUNCTOR
(
pixel_shuffle_grad
,
PixelShuffleGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
PixelShuffleGradInferMeta
));
REGISTER_OPERATOR
(
pixel_shuffle_grad
,
ops
::
PixelShuffleGradOp
,
PixelShuffleGradInferShapeFunctor
);
REGISTER_OP_VERSION
(
pixel_shuffle
)
.
AddCheckpoint
(
...
...
paddle/phi/infermeta/unary.cc
浏览文件 @
58c99030
...
...
@@ -1280,6 +1280,36 @@ void PixelShuffleInferMeta(const MetaTensor& x,
out
->
set_dims
(
output_dims
);
}
void
PixelShuffleGradInferMeta
(
const
MetaTensor
&
out_grad
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
x_grad
)
{
auto
do_dims
=
out_grad
.
dims
();
PADDLE_ENFORCE_EQ
(
do_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
do_dims
.
size
()));
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
auto
dx_dims
=
do_dims
;
dx_dims
[
0
]
=
do_dims
[
0
];
if
(
!
channel_last
)
{
dx_dims
[
1
]
=
do_dims
[
1
]
*
(
upscale_factor
*
upscale_factor
);
dx_dims
[
2
]
=
do_dims
[
2
]
/
upscale_factor
;
dx_dims
[
3
]
=
do_dims
[
3
]
/
upscale_factor
;
}
else
{
dx_dims
[
1
]
=
do_dims
[
1
]
/
upscale_factor
;
dx_dims
[
2
]
=
do_dims
[
2
]
/
upscale_factor
;
dx_dims
[
3
]
=
do_dims
[
3
]
*
(
upscale_factor
*
upscale_factor
);
}
x_grad
->
set_dims
(
dx_dims
);
x_grad
->
set_dtype
(
out_grad
.
dtype
());
}
void
PNormInferMeta
(
const
MetaTensor
&
x
,
float
porder
,
int
axis
,
...
...
paddle/phi/infermeta/unary.h
浏览文件 @
58c99030
...
...
@@ -195,6 +195,11 @@ void PixelShuffleInferMeta(const MetaTensor& x,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
void
PixelShuffleGradInferMeta
(
const
MetaTensor
&
out_grad
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
x_grad
);
void
PNormInferMeta
(
const
MetaTensor
&
x
,
float
porder
,
int
axis
,
...
...
python/paddle/fluid/tests/unittests/test_conj_op.py
浏览文件 @
58c99030
...
...
@@ -32,6 +32,7 @@ paddle.enable_static()
class
TestConjOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"conj"
self
.
python_api
=
paddle
.
tensor
.
conj
self
.
init_dtype_type
()
self
.
init_input_output
()
self
.
init_grad_input_output
()
...
...
@@ -53,14 +54,15 @@ class TestConjOp(OpTest):
self
.
grad_in
=
np
.
conj
(
self
.
grad_out
)
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad_normal
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
user_defined_grads
=
[
self
.
grad_in
],
user_defined_grad_outputs
=
[
self
.
grad_out
])
user_defined_grad_outputs
=
[
self
.
grad_out
],
check_eager
=
True
)
class
TestComplexConjOp
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_flip.py
浏览文件 @
58c99030
...
...
@@ -67,6 +67,7 @@ class TestFlipOp_API(unittest.TestCase):
class
TestFlipOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
'flip'
self
.
python_api
=
paddle
.
tensor
.
flip
self
.
init_test_case
()
self
.
inputs
=
{
'X'
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
'float64'
)}
self
.
init_attrs
()
...
...
@@ -76,10 +77,10 @@ class TestFlipOp(OpTest):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
self
.
check_grad
([
"X"
],
"Out"
,
check_eager
=
True
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
6
,
4
,
2
,
3
)
...
...
@@ -131,4 +132,5 @@ class TestFlipOpNegAxis(TestFlipOp):
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_pixel_shuffle.py
浏览文件 @
58c99030
...
...
@@ -52,6 +52,7 @@ def pixel_shuffle_np(x, up_factor, data_format="NCHW"):
class
TestPixelShuffleOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"pixel_shuffle"
self
.
python_api
=
paddle
.
nn
.
functional
.
pixel_shuffle
self
.
init_data_format
()
n
,
c
,
h
,
w
=
2
,
9
,
4
,
4
...
...
@@ -73,10 +74,10 @@ class TestPixelShuffleOp(OpTest):
self
.
format
=
"NCHW"
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestChannelLast
(
TestPixelShuffleOp
):
...
...
@@ -220,4 +221,5 @@ class TestPixelShuffleError(unittest.TestCase):
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
python/paddle/tensor/manipulation.py
浏览文件 @
58c99030
...
...
@@ -458,6 +458,10 @@ def flip(x, axis, name=None):
"""
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
if
in_dygraph_mode
():
return
_C_ops
.
final_state_flip
(
x
,
axis
)
if
paddle
.
in_dynamic_mode
():
return
_C_ops
.
flip
(
x
,
"axis"
,
axis
)
...
...
python/paddle/tensor/math.py
浏览文件 @
58c99030
...
...
@@ -3349,6 +3349,9 @@ def conj(x, name=None):
# [(4-4j), (5-5j), (6-6j)]])
"""
if
in_dygraph_mode
():
return
_C_ops
.
final_state_conj
(
x
)
if
paddle
.
in_dynamic_mode
():
return
_C_ops
.
conj
(
x
)
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
58c99030
...
...
@@ -345,6 +345,7 @@
func
:
UnchangedInferMeta
kernel
:
func
:
conj
backward
:
conj_grad
-
api
:
conv2d
args
:
(Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
...
...
@@ -628,6 +629,7 @@
func
:
FlipInferMeta
kernel
:
func
:
flip
backward
:
flip_grad
-
api
:
floor
args
:
(Tensor x)
...
...
@@ -1382,7 +1384,7 @@
func
:
PixelShuffleInferMeta
kernel
:
func
:
pixel_shuffle
#
backward : pixel_shuffle_grad
backward
:
pixel_shuffle_grad
# poisson // no need grad
-
api
:
poisson
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
58c99030
...
...
@@ -208,6 +208,16 @@
output
:
Tensor[](x_grad)
invoke
:
concat_grad_impl(x, out_grad, axis)
-
backward_api
:
conj_grad
forward
:
conj (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
conj
-
backward_api
:
conv2d_grad
forward
:
conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
...
...
@@ -436,6 +446,16 @@
backend
:
out_grad
layout
:
out_grad
-
backward_api
:
flip_grad
forward
:
flip (Tensor x, int[] axis) -> Tensor(out)
args
:
(Tensor out_grad, int[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
flip
-
backward_api
:
floor_grad
forward
:
floor(Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
...
...
@@ -990,6 +1010,15 @@
kernel
:
func
:
pad3d_grad
-
backward_api
:
pixel_shuffle_grad
forward
:
pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out)
args
:
(Tensor out_grad, int upscale_factor, str data_format)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PixelShuffleGradInferMeta
kernel
:
func
:
pixel_shuffle_grad
-
backward_api
:
pool2d_grad
forward
:
pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录