Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
32b2c45c
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
32b2c45c
编写于
12月 15, 2022
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(ci): fix the problem that torch not found
GitOrigin-RevId: f8671d931b91d651193b56142f31f8d66505f21e
上级
266263dc
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
0 addition
and
94 deletion
+0
-94
imperative/python/test/unit/core/test_indexing_op.py
imperative/python/test/unit/core/test_indexing_op.py
+0
-20
imperative/python/test/unit/functional/test_tensor.py
imperative/python/test/unit/functional/test_tensor.py
+0
-74
未找到文件。
imperative/python/test/unit/core/test_indexing_op.py
浏览文件 @
32b2c45c
...
@@ -5,7 +5,6 @@ from tempfile import NamedTemporaryFile
...
@@ -5,7 +5,6 @@ from tempfile import NamedTemporaryFile
import
numpy
as
np
import
numpy
as
np
import
pytest
import
pytest
import
torch
from
utils
import
make_tensor
from
utils
import
make_tensor
import
megengine
import
megengine
...
@@ -624,25 +623,6 @@ def test_advance_indexing_with_bool(test_varnode):
...
@@ -624,25 +623,6 @@ def test_advance_indexing_with_bool(test_varnode):
)
)
def
test_advance_indexing_autodiff
():
x
=
Tensor
([
2
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
2
],
dtype
=
"float32"
)
gm
=
GradManager
()
gm
.
attach
(
x
)
with
gm
:
a
=
x
+
1
a
[
x
>
3
]
=
0.3
b
=
a
+
1
gm
.
backward
(
b
.
sum
())
torch_x
=
torch
.
tensor
(
[
2
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
2
],
dtype
=
torch
.
float32
,
requires_grad
=
True
)
a
=
torch_x
+
1
a
[
torch_x
>
3
]
=
0.3
b
=
a
+
1
(
b
.
sum
()).
backward
()
np
.
testing
.
assert_equal
(
x
.
grad
.
numpy
(),
torch_x
.
grad
.
numpy
())
@
pytest
.
mark
.
parametrize
(
"symbolic"
,
[
True
,
False
,
None
])
@
pytest
.
mark
.
parametrize
(
"symbolic"
,
[
True
,
False
,
None
])
def
test_subtensor_on_empty_tensor
(
symbolic
):
def
test_subtensor_on_empty_tensor
(
symbolic
):
np_x
=
np
.
array
([],
dtype
=
np
.
float32
).
reshape
(
10
,
0
,
10
)
np_x
=
np
.
array
([],
dtype
=
np
.
float32
).
reshape
(
10
,
0
,
10
)
...
...
imperative/python/test/unit/functional/test_tensor.py
浏览文件 @
32b2c45c
...
@@ -227,80 +227,6 @@ def test_split_basic(is_varnode):
...
@@ -227,80 +227,6 @@ def test_split_basic(is_varnode):
set_symbolic_shape
(
saved_symbolic_shape
)
set_symbolic_shape
(
saved_symbolic_shape
)
def
test_concat_and_stack
():
import
copy
from
megengine.autodiff
import
GradManager
import
torch
def
generate_test_data
(
max_nr_inp
,
max_dim
,
max_dim_len
,
test_concat
=
True
):
nr_inp
=
np
.
random
.
randint
(
1
,
max_nr_inp
)
if
max_nr_inp
>
1
else
1
dims
=
np
.
random
.
randint
(
1
,
max_dim
)
cat_axis
=
(
np
.
random
.
randint
(
-
dims
,
dims
)
if
test_concat
else
np
.
random
.
randint
(
-
dims
-
1
,
dims
+
1
)
)
ishape
=
[
np
.
random
.
randint
(
0
,
max_dim_len
)
for
_
in
range
(
dims
)]
ishapes
=
[
copy
.
deepcopy
(
ishape
)
for
_
in
range
(
nr_inp
)]
if
test_concat
:
for
i
in
range
(
nr_inp
):
ishapes
[
i
][
cat_axis
]
=
np
.
random
.
randint
(
0
,
max_dim_len
)
inp_nps
=
[]
for
ishape
in
ishapes
:
inp_nps
.
append
(
np
.
random
.
randn
(
*
ishape
))
return
inp_nps
,
cat_axis
def
test_impl
(
max_nr_inp
,
max_dim
,
max_dim_len
,
test_concat
):
inp_nps
,
cat_axis
=
generate_test_data
(
max_nr_inp
,
max_dim
,
max_dim_len
,
test_concat
)
inp_mges
=
[
Tensor
(
inp_np
)
for
inp_np
in
inp_nps
]
inp_torchs
=
[
torch
.
tensor
(
inp_np
,
requires_grad
=
True
)
for
inp_np
in
inp_nps
]
if
test_concat
:
np_func
,
mge_func
,
torch_func
=
np
.
concatenate
,
F
.
concat
,
torch
.
cat
else
:
np_func
,
mge_func
,
torch_func
=
np
.
stack
,
F
.
stack
,
torch
.
stack
res_np
=
np_func
(
inp_nps
,
axis
=
cat_axis
)
grad_np
=
np
.
random
.
randn
(
*
res_np
.
shape
).
astype
(
np
.
float32
)
gm
=
GradManager
().
attach
(
inp_mges
)
with
gm
:
res_mge
=
mge_func
(
inp_mges
,
axis
=
cat_axis
)
gm
.
backward
(
res_mge
,
Tensor
(
grad_np
))
res_torch
=
torch_func
(
inp_torchs
,
dim
=
cat_axis
)
res_torch
.
backward
(
torch
.
tensor
(
grad_np
))
np
.
testing
.
assert_allclose
(
res_mge
.
numpy
(),
res_torch
.
detach
().
cpu
().
numpy
())
for
inp_mge
,
inp_torch
in
zip
(
inp_mges
,
inp_torchs
):
np
.
testing
.
assert_allclose
(
inp_mge
.
grad
.
numpy
(),
inp_torch
.
grad
.
detach
().
cpu
().
numpy
()
)
def
test_concat
(
max_nr_inp
,
max_dim
,
max_dim_len
):
test_impl
(
max_nr_inp
,
max_dim
,
max_dim_len
,
test_concat
=
True
)
def
test_stack
(
max_nr_inp
,
max_dim
,
max_dim_len
):
test_impl
(
max_nr_inp
,
max_dim
,
max_dim_len
,
test_concat
=
False
)
# test only one input
test_concat
(
1
,
7
,
16
)
test_stack
(
1
,
7
,
16
)
# test zero shape
test_concat
(
10
,
7
,
1
)
test_stack
(
10
,
7
,
1
)
for
_
in
range
(
3
):
test_concat
(
10
,
7
,
16
)
for
_
in
range
(
3
):
test_stack
(
10
,
7
,
16
)
@
pytest
.
mark
.
parametrize
(
"symbolic"
,
[
None
,
False
,
True
])
@
pytest
.
mark
.
parametrize
(
"symbolic"
,
[
None
,
False
,
True
])
def
test_split
(
symbolic
):
def
test_split
(
symbolic
):
x
=
Tensor
(
np
.
random
.
random
((
10
,
20
)),
dtype
=
np
.
float32
)
x
=
Tensor
(
np
.
random
.
random
((
10
,
20
)),
dtype
=
np
.
float32
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录