Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
bc88fbb5
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bc88fbb5
编写于
4月 08, 2022
作者:
H
hong
提交者:
GitHub
4月 08, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add conj pixel shuffle yaml (#41499)
* ad conj flip yaml * add flip conj pixel shuffle
上级
9844aafb
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
91 addition
and
44 deletion
+91
-44
paddle/fluid/operators/pixel_shuffle_op.cc
paddle/fluid/operators/pixel_shuffle_op.cc
+5
-37
paddle/phi/infermeta/unary.cc
paddle/phi/infermeta/unary.cc
+30
-0
paddle/phi/infermeta/unary.h
paddle/phi/infermeta/unary.h
+5
-0
python/paddle/fluid/tests/unittests/test_conj_op.py
python/paddle/fluid/tests/unittests/test_conj_op.py
+4
-2
python/paddle/fluid/tests/unittests/test_flip.py
python/paddle/fluid/tests/unittests/test_flip.py
+4
-2
python/paddle/fluid/tests/unittests/test_pixel_shuffle.py
python/paddle/fluid/tests/unittests/test_pixel_shuffle.py
+4
-2
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+4
-0
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+3
-0
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+3
-1
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+29
-0
未找到文件。
paddle/fluid/operators/pixel_shuffle_op.cc
浏览文件 @
bc88fbb5
...
@@ -82,42 +82,6 @@ class PixelShuffleGradMaker : public framework::SingleGradOpMaker<T> {
...
@@ -82,42 +82,6 @@ class PixelShuffleGradMaker : public framework::SingleGradOpMaker<T> {
class
PixelShuffleGradOp
:
public
framework
::
OperatorWithKernel
{
class
PixelShuffleGradOp
:
public
framework
::
OperatorWithKernel
{
public:
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
platform
::
errors
::
NotFound
(
"Input(Out@Grad) should not be null"
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)),
true
,
platform
::
errors
::
NotFound
(
"Output(X@Grad) should not be null"
));
auto
do_dims
=
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"Out"
));
PADDLE_ENFORCE_EQ
(
do_dims
.
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
do_dims
.
size
()));
auto
upscale_factor
=
ctx
->
Attrs
().
Get
<
int
>
(
"upscale_factor"
);
const
std
::
string
data_format
=
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"data_format"
);
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
auto
dx_dims
=
do_dims
;
dx_dims
[
0
]
=
do_dims
[
0
];
if
(
!
channel_last
)
{
dx_dims
[
1
]
=
do_dims
[
1
]
*
(
upscale_factor
*
upscale_factor
);
dx_dims
[
2
]
=
do_dims
[
2
]
/
upscale_factor
;
dx_dims
[
3
]
=
do_dims
[
3
]
/
upscale_factor
;
}
else
{
dx_dims
[
1
]
=
do_dims
[
1
]
/
upscale_factor
;
dx_dims
[
2
]
=
do_dims
[
2
]
/
upscale_factor
;
dx_dims
[
3
]
=
do_dims
[
3
]
*
(
upscale_factor
*
upscale_factor
);
}
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
dx_dims
);
}
};
};
}
// namespace operators
}
// namespace operators
...
@@ -132,7 +96,11 @@ REGISTER_OPERATOR(pixel_shuffle, ops::PixelShuffleOp, ops::PixelShuffleOpMaker,
...
@@ -132,7 +96,11 @@ REGISTER_OPERATOR(pixel_shuffle, ops::PixelShuffleOp, ops::PixelShuffleOpMaker,
ops
::
PixelShuffleGradMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
PixelShuffleGradMaker
<
paddle
::
imperative
::
OpBase
>
,
PixelShuffleInferShapeFunctor
);
PixelShuffleInferShapeFunctor
);
REGISTER_OPERATOR
(
pixel_shuffle_grad
,
ops
::
PixelShuffleGradOp
);
DECLARE_INFER_SHAPE_FUNCTOR
(
pixel_shuffle_grad
,
PixelShuffleGradInferShapeFunctor
,
PD_INFER_META
(
phi
::
PixelShuffleGradInferMeta
));
REGISTER_OPERATOR
(
pixel_shuffle_grad
,
ops
::
PixelShuffleGradOp
,
PixelShuffleGradInferShapeFunctor
);
REGISTER_OP_VERSION
(
pixel_shuffle
)
REGISTER_OP_VERSION
(
pixel_shuffle
)
.
AddCheckpoint
(
.
AddCheckpoint
(
...
...
paddle/phi/infermeta/unary.cc
浏览文件 @
bc88fbb5
...
@@ -1315,6 +1315,36 @@ void PixelShuffleInferMeta(const MetaTensor& x,
...
@@ -1315,6 +1315,36 @@ void PixelShuffleInferMeta(const MetaTensor& x,
out
->
set_dims
(
output_dims
);
out
->
set_dims
(
output_dims
);
}
}
void
PixelShuffleGradInferMeta
(
const
MetaTensor
&
out_grad
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
x_grad
)
{
auto
do_dims
=
out_grad
.
dims
();
PADDLE_ENFORCE_EQ
(
do_dims
.
size
(),
4
,
phi
::
errors
::
InvalidArgument
(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u."
,
do_dims
.
size
()));
const
bool
channel_last
=
(
data_format
==
"NHWC"
);
auto
dx_dims
=
do_dims
;
dx_dims
[
0
]
=
do_dims
[
0
];
if
(
!
channel_last
)
{
dx_dims
[
1
]
=
do_dims
[
1
]
*
(
upscale_factor
*
upscale_factor
);
dx_dims
[
2
]
=
do_dims
[
2
]
/
upscale_factor
;
dx_dims
[
3
]
=
do_dims
[
3
]
/
upscale_factor
;
}
else
{
dx_dims
[
1
]
=
do_dims
[
1
]
/
upscale_factor
;
dx_dims
[
2
]
=
do_dims
[
2
]
/
upscale_factor
;
dx_dims
[
3
]
=
do_dims
[
3
]
*
(
upscale_factor
*
upscale_factor
);
}
x_grad
->
set_dims
(
dx_dims
);
x_grad
->
set_dtype
(
out_grad
.
dtype
());
}
void
PNormInferMeta
(
const
MetaTensor
&
x
,
void
PNormInferMeta
(
const
MetaTensor
&
x
,
float
porder
,
float
porder
,
int
axis
,
int
axis
,
...
...
paddle/phi/infermeta/unary.h
浏览文件 @
bc88fbb5
...
@@ -200,6 +200,11 @@ void PixelShuffleInferMeta(const MetaTensor& x,
...
@@ -200,6 +200,11 @@ void PixelShuffleInferMeta(const MetaTensor& x,
const
std
::
string
&
data_format
,
const
std
::
string
&
data_format
,
MetaTensor
*
out
);
MetaTensor
*
out
);
void
PixelShuffleGradInferMeta
(
const
MetaTensor
&
out_grad
,
int
upscale_factor
,
const
std
::
string
&
data_format
,
MetaTensor
*
x_grad
);
void
PNormInferMeta
(
const
MetaTensor
&
x
,
void
PNormInferMeta
(
const
MetaTensor
&
x
,
float
porder
,
float
porder
,
int
axis
,
int
axis
,
...
...
python/paddle/fluid/tests/unittests/test_conj_op.py
浏览文件 @
bc88fbb5
...
@@ -32,6 +32,7 @@ paddle.enable_static()
...
@@ -32,6 +32,7 @@ paddle.enable_static()
class
TestConjOp
(
OpTest
):
class
TestConjOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"conj"
self
.
op_type
=
"conj"
self
.
python_api
=
paddle
.
tensor
.
conj
self
.
init_dtype_type
()
self
.
init_dtype_type
()
self
.
init_input_output
()
self
.
init_input_output
()
self
.
init_grad_input_output
()
self
.
init_grad_input_output
()
...
@@ -53,14 +54,15 @@ class TestConjOp(OpTest):
...
@@ -53,14 +54,15 @@ class TestConjOp(OpTest):
self
.
grad_in
=
np
.
conj
(
self
.
grad_out
)
self
.
grad_in
=
np
.
conj
(
self
.
grad_out
)
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad_normal
(
self
):
def
test_check_grad_normal
(
self
):
self
.
check_grad
(
self
.
check_grad
(
[
'X'
],
[
'X'
],
'Out'
,
'Out'
,
user_defined_grads
=
[
self
.
grad_in
],
user_defined_grads
=
[
self
.
grad_in
],
user_defined_grad_outputs
=
[
self
.
grad_out
])
user_defined_grad_outputs
=
[
self
.
grad_out
],
check_eager
=
True
)
class
TestComplexConjOp
(
unittest
.
TestCase
):
class
TestComplexConjOp
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_flip.py
浏览文件 @
bc88fbb5
...
@@ -67,6 +67,7 @@ class TestFlipOp_API(unittest.TestCase):
...
@@ -67,6 +67,7 @@ class TestFlipOp_API(unittest.TestCase):
class
TestFlipOp
(
OpTest
):
class
TestFlipOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
'flip'
self
.
op_type
=
'flip'
self
.
python_api
=
paddle
.
tensor
.
flip
self
.
init_test_case
()
self
.
init_test_case
()
self
.
inputs
=
{
'X'
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
'float64'
)}
self
.
inputs
=
{
'X'
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
'float64'
)}
self
.
init_attrs
()
self
.
init_attrs
()
...
@@ -76,10 +77,10 @@ class TestFlipOp(OpTest):
...
@@ -76,10 +77,10 @@ class TestFlipOp(OpTest):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
self
.
attrs
=
{
"axis"
:
self
.
axis
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
self
.
check_grad
([
"X"
],
"Out"
,
check_eager
=
True
)
def
init_test_case
(
self
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
6
,
4
,
2
,
3
)
self
.
in_shape
=
(
6
,
4
,
2
,
3
)
...
@@ -131,4 +132,5 @@ class TestFlipOpNegAxis(TestFlipOp):
...
@@ -131,4 +132,5 @@ class TestFlipOpNegAxis(TestFlipOp):
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_pixel_shuffle.py
浏览文件 @
bc88fbb5
...
@@ -52,6 +52,7 @@ def pixel_shuffle_np(x, up_factor, data_format="NCHW"):
...
@@ -52,6 +52,7 @@ def pixel_shuffle_np(x, up_factor, data_format="NCHW"):
class
TestPixelShuffleOp
(
OpTest
):
class
TestPixelShuffleOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"pixel_shuffle"
self
.
op_type
=
"pixel_shuffle"
self
.
python_api
=
paddle
.
nn
.
functional
.
pixel_shuffle
self
.
init_data_format
()
self
.
init_data_format
()
n
,
c
,
h
,
w
=
2
,
9
,
4
,
4
n
,
c
,
h
,
w
=
2
,
9
,
4
,
4
...
@@ -73,10 +74,10 @@ class TestPixelShuffleOp(OpTest):
...
@@ -73,10 +74,10 @@ class TestPixelShuffleOp(OpTest):
self
.
format
=
"NCHW"
self
.
format
=
"NCHW"
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestChannelLast
(
TestPixelShuffleOp
):
class
TestChannelLast
(
TestPixelShuffleOp
):
...
@@ -220,4 +221,5 @@ class TestPixelShuffleError(unittest.TestCase):
...
@@ -220,4 +221,5 @@ class TestPixelShuffleError(unittest.TestCase):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/tensor/manipulation.py
浏览文件 @
bc88fbb5
...
@@ -458,6 +458,10 @@ def flip(x, axis, name=None):
...
@@ -458,6 +458,10 @@ def flip(x, axis, name=None):
"""
"""
if
isinstance
(
axis
,
int
):
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
axis
=
[
axis
]
if
in_dygraph_mode
():
return
_C_ops
.
final_state_flip
(
x
,
axis
)
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
return
_C_ops
.
flip
(
x
,
"axis"
,
axis
)
return
_C_ops
.
flip
(
x
,
"axis"
,
axis
)
...
...
python/paddle/tensor/math.py
浏览文件 @
bc88fbb5
...
@@ -3349,6 +3349,9 @@ def conj(x, name=None):
...
@@ -3349,6 +3349,9 @@ def conj(x, name=None):
# [(4-4j), (5-5j), (6-6j)]])
# [(4-4j), (5-5j), (6-6j)]])
"""
"""
if
in_dygraph_mode
():
return
_C_ops
.
final_state_conj
(
x
)
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
return
_C_ops
.
conj
(
x
)
return
_C_ops
.
conj
(
x
)
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
bc88fbb5
...
@@ -345,6 +345,7 @@
...
@@ -345,6 +345,7 @@
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
kernel
:
kernel
:
func
:
conj
func
:
conj
backward
:
conj_grad
-
api
:
conv2d
-
api
:
conv2d
args
:
(Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
args
:
(Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
...
@@ -659,6 +660,7 @@
...
@@ -659,6 +660,7 @@
func
:
FlipInferMeta
func
:
FlipInferMeta
kernel
:
kernel
:
func
:
flip
func
:
flip
backward
:
flip_grad
-
api
:
floor
-
api
:
floor
args
:
(Tensor x)
args
:
(Tensor x)
...
@@ -1430,7 +1432,7 @@
...
@@ -1430,7 +1432,7 @@
func
:
PixelShuffleInferMeta
func
:
PixelShuffleInferMeta
kernel
:
kernel
:
func
:
pixel_shuffle
func
:
pixel_shuffle
#
backward : pixel_shuffle_grad
backward
:
pixel_shuffle_grad
# poisson // no need grad
# poisson // no need grad
-
api
:
poisson
-
api
:
poisson
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
bc88fbb5
...
@@ -208,6 +208,16 @@
...
@@ -208,6 +208,16 @@
output
:
Tensor[](x_grad)
output
:
Tensor[](x_grad)
invoke
:
concat_grad_impl(x, out_grad, axis)
invoke
:
concat_grad_impl(x, out_grad, axis)
-
backward_api
:
conj_grad
forward
:
conj (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
conj
-
backward_api
:
conv2d_grad
-
backward_api
:
conv2d_grad
forward
:
conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
forward
:
conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
...
@@ -456,6 +466,16 @@
...
@@ -456,6 +466,16 @@
backend
:
out_grad
backend
:
out_grad
layout
:
out_grad
layout
:
out_grad
-
backward_api
:
flip_grad
forward
:
flip (Tensor x, int[] axis) -> Tensor(out)
args
:
(Tensor out_grad, int[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
flip
-
backward_api
:
floor_grad
-
backward_api
:
floor_grad
forward
:
floor(Tensor x) -> Tensor(out)
forward
:
floor(Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
args
:
(Tensor out_grad)
...
@@ -1010,6 +1030,15 @@
...
@@ -1010,6 +1030,15 @@
kernel
:
kernel
:
func
:
pad3d_grad
func
:
pad3d_grad
-
backward_api
:
pixel_shuffle_grad
forward
:
pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out)
args
:
(Tensor out_grad, int upscale_factor, str data_format)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PixelShuffleGradInferMeta
kernel
:
func
:
pixel_shuffle_grad
-
backward_api
:
pool2d_grad
-
backward_api
:
pool2d_grad
forward
:
pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
forward
:
pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录