Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
fa7aa6b8
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fa7aa6b8
编写于
10月 29, 2021
作者:
F
Feiyu Chan
提交者:
GitHub
10月 29, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
1. fix ifftshift(missing negative sign before shifts); (#36835)
2. add complex data type support for paddle.shape at graph assembly.
上级
c716cf35
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
69 addition
and
14 deletion
+69
-14
python/paddle/fft.py
python/paddle/fft.py
+1
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+4
-3
python/paddle/fluid/tests/unittests/fft/test_fft.py
python/paddle/fluid/tests/unittests/fft/test_fft.py
+14
-10
python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py
...e/fluid/tests/unittests/fft/test_fft_with_static_graph.py
+50
-0
未找到文件。
python/paddle/fft.py
浏览文件 @
fa7aa6b8
...
...
@@ -1345,7 +1345,7 @@ def ifftshift(x, axes=None, name=None):
# shift all axes
rank
=
len
(
x
.
shape
)
axes
=
list
(
range
(
0
,
rank
))
shifts
=
shape
//
2
shifts
=
-
shape
//
2
elif
isinstance
(
axes
,
int
):
shifts
=
-
shape
[
axes
]
//
2
else
:
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
fa7aa6b8
...
...
@@ -11396,9 +11396,10 @@ def shape(input):
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([ 3, 100, 100], dtype=int32)]
"""
check_variable_and_dtype(
input, 'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'shape')
check_variable_and_dtype(input, 'input', [
'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64',
'complex128'
], 'shape')
helper = LayerHelper('shape', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
...
...
python/paddle/fluid/tests/unittests/fft/test_fft.py
浏览文件 @
fa7aa6b8
...
...
@@ -1009,11 +1009,13 @@ class TestRfftFreq(unittest.TestCase):
@
place
(
DEVICES
)
@
parameterize
(
(
TEST_CASE_NAME
,
'x'
,
'axes'
,
'dtype'
),
[(
'test_1d'
,
np
.
random
.
randn
(
10
),
(
0
,
),
'float64'
),
(
'test_2d'
,
np
.
random
.
randn
(
10
,
10
),
(
0
,
1
),
'float64'
),
(
'test_2d_with_all_axes'
,
np
.
random
.
randn
(
10
,
10
),
None
,
'float64'
)])
@
parameterize
((
TEST_CASE_NAME
,
'x'
,
'axes'
,
'dtype'
),
[
(
'test_1d'
,
np
.
random
.
randn
(
10
),
(
0
,
),
'float64'
),
(
'test_2d'
,
np
.
random
.
randn
(
10
,
10
),
(
0
,
1
),
'float64'
),
(
'test_2d_with_all_axes'
,
np
.
random
.
randn
(
10
,
10
),
None
,
'float64'
),
(
'test_2d_odd_with_all_axes'
,
np
.
random
.
randn
(
5
,
5
)
+
1j
*
np
.
random
.
randn
(
5
,
5
),
None
,
'complex128'
),
])
class
TestFftShift
(
unittest
.
TestCase
):
def
test_fftshift
(
self
):
"""Test fftshift with norm condition
...
...
@@ -1028,11 +1030,13 @@ class TestFftShift(unittest.TestCase):
@
place
(
DEVICES
)
@
parameterize
((
TEST_CASE_NAME
,
'x'
,
'axes'
),
[
(
'test_1d'
,
np
.
random
.
randn
(
10
),
(
0
,
),
'float64'
),
(
'test_2d'
,
np
.
random
.
randn
(
10
,
10
),
(
0
,
1
),
'float64'
),
(
'test_2d_with_all_axes'
,
np
.
random
.
randn
(
10
,
10
),
None
,
'float64'
),
])
@
parameterize
(
(
TEST_CASE_NAME
,
'x'
,
'axes'
),
[(
'test_1d'
,
np
.
random
.
randn
(
10
),
(
0
,
),
'float64'
),
(
'test_2d'
,
np
.
random
.
randn
(
10
,
10
),
(
0
,
1
),
'float64'
),
(
'test_2d_with_all_axes'
,
np
.
random
.
randn
(
10
,
10
),
None
,
'float64'
),
(
'test_2d_odd_with_all_axes'
,
np
.
random
.
randn
(
5
,
5
)
+
1j
*
np
.
random
.
randn
(
5
,
5
),
None
,
'complex128'
)])
class
TestIfftShift
(
unittest
.
TestCase
):
def
test_ifftshift
(
self
):
"""Test ifftshift with norm condition
...
...
python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py
浏览文件 @
fa7aa6b8
...
...
@@ -888,6 +888,56 @@ class TestIhfftnException(unittest.TestCase):
pass
@
place
(
DEVICES
)
@
parameterize
((
TEST_CASE_NAME
,
'x'
,
'axes'
,
'dtype'
),
[
(
'test_1d'
,
np
.
random
.
randn
(
10
),
(
0
,
),
'float64'
),
(
'test_2d'
,
np
.
random
.
randn
(
10
,
10
),
(
0
,
1
),
'float64'
),
(
'test_2d_with_all_axes'
,
np
.
random
.
randn
(
10
,
10
),
None
,
'float64'
),
(
'test_2d_odd_with_all_axes'
,
np
.
random
.
randn
(
5
,
5
)
+
1j
*
np
.
random
.
randn
(
5
,
5
),
None
,
'complex128'
),
])
class
TestFftShift
(
unittest
.
TestCase
):
def
test_fftshift
(
self
):
"""Test fftshift with norm condition
"""
paddle
.
enable_static
()
mp
,
sp
=
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
mp
,
sp
):
input
=
paddle
.
static
.
data
(
'input'
,
x
.
shape
,
dtype
=
x
.
dtype
)
output
=
paddle
.
fft
.
fftshift
(
input
,
axes
)
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
sp
)
[
output
]
=
exe
.
run
(
mp
,
feed
=
{
'input'
:
x
},
fetch_list
=
[
output
])
yield
output
paddle
.
disable_static
()
@
place
(
DEVICES
)
@
parameterize
(
(
TEST_CASE_NAME
,
'x'
,
'axes'
),
[(
'test_1d'
,
np
.
random
.
randn
(
10
),
(
0
,
),
'float64'
),
(
'test_2d'
,
np
.
random
.
randn
(
10
,
10
),
(
0
,
1
),
'float64'
),
(
'test_2d_with_all_axes'
,
np
.
random
.
randn
(
10
,
10
),
None
,
'float64'
),
(
'test_2d_odd_with_all_axes'
,
np
.
random
.
randn
(
5
,
5
)
+
1j
*
np
.
random
.
randn
(
5
,
5
),
None
,
'complex128'
)])
class
TestIfftShift
(
unittest
.
TestCase
):
def
test_ifftshift
(
self
):
"""Test ifftshift with norm condition
"""
paddle
.
enable_static
()
mp
,
sp
=
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
mp
,
sp
):
input
=
paddle
.
static
.
data
(
'input'
,
x
.
shape
,
dtype
=
x
.
dtype
)
output
=
paddle
.
fft
.
ifftshift
(
input
,
axes
)
exe
=
paddle
.
static
.
Executor
(
place
)
exe
.
run
(
sp
)
[
output
]
=
exe
.
run
(
mp
,
feed
=
{
'input'
:
x
},
fetch_list
=
[
output
])
yield
output
paddle
.
disable_static
()
if
__name__
==
'__main__'
:
unittest
.
main
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录