Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
87f00232
MegEngine
项目概览
MegEngine 天元
/
MegEngine
接近 2 年 前同步成功
通知
414
Star
4708
Fork
583
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
87f00232
编写于
3月 07, 2022
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(mge/gm): fix missing dtype checking while attach tensors
GitOrigin-RevId: f0aaea99b93472b893eeb3ba35c6293c5f15b122
上级
3726f5cc
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
26 addition
and
2 deletion
+26
-2
imperative/python/megengine/autodiff/grad_manager.py
imperative/python/megengine/autodiff/grad_manager.py
+5
-1
imperative/python/megengine/core/tensor/dtype.py
imperative/python/megengine/core/tensor/dtype.py
+5
-1
imperative/python/test/unit/autodiff/test_grad_manger.py
imperative/python/test/unit/autodiff/test_grad_manger.py
+16
-0
未找到文件。
imperative/python/megengine/autodiff/grad_manager.py
浏览文件 @
87f00232
...
@@ -6,11 +6,11 @@
...
@@ -6,11 +6,11 @@
# software distributed under the License is distributed on an
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import
weakref
import
weakref
from
collections
import
OrderedDict
from
typing
import
Callable
,
Iterable
,
List
,
Union
from
typing
import
Callable
,
Iterable
,
List
,
Union
from
..core._imperative_rt.core2
import
pop_scope
,
push_scope
,
set_option
from
..core._imperative_rt.core2
import
pop_scope
,
push_scope
,
set_option
from
..core.autodiff.grad
import
Grad
from
..core.autodiff.grad
import
Grad
from
..core.tensor.dtype
import
is_differentible_dtype
from
..logger
import
get_logger
from
..logger
import
get_logger
from
..tensor
import
Tensor
from
..tensor
import
Tensor
from
..utils.future
import
Future
from
..utils.future
import
Future
...
@@ -208,6 +208,10 @@ class GradManager:
...
@@ -208,6 +208,10 @@ class GradManager:
for
x
in
tensors
:
for
x
in
tensors
:
assert
isinstance
(
x
,
Tensor
),
"Object to be attached should be Tensor"
assert
isinstance
(
x
,
Tensor
),
"Object to be attached should be Tensor"
assert
is_differentible_dtype
(
x
.
dtype
),
(
"Only tensors of floating point dtype can be attached to get gradients, "
"get tensor dtype: {} and shape: {}"
.
format
(
x
.
dtype
,
x
.
shape
)
)
spec
=
self
.
_attach_specs
.
get
(
id
(
x
))
spec
=
self
.
_attach_specs
.
get
(
id
(
x
))
new_attach
=
spec
is
None
new_attach
=
spec
is
None
if
spec
is
None
:
if
spec
is
None
:
...
...
imperative/python/megengine/core/tensor/dtype.py
浏览文件 @
87f00232
...
@@ -38,6 +38,10 @@ def is_bfloat16(dtype):
...
@@ -38,6 +38,10 @@ def is_bfloat16(dtype):
return
dtype
is
bfloat16
return
dtype
is
bfloat16
def
is_differentible_dtype
(
dtype
):
return
dtype
==
np
.
float32
or
dtype
==
np
.
float16
or
is_bfloat16
(
dtype
)
# quantization dtype related
# quantization dtype related
# use namedtuple to make class immutable, comparable and easy to print
# use namedtuple to make class immutable, comparable and easy to print
...
@@ -114,7 +118,7 @@ def create_quantized_dtype(
...
@@ -114,7 +118,7 @@ def create_quantized_dtype(
dtype_meta
:
QuantDtypeMeta
,
scale
:
float
,
zp
:
Union
[
int
,
None
]
dtype_meta
:
QuantDtypeMeta
,
scale
:
float
,
zp
:
Union
[
int
,
None
]
):
):
r
"""Get quantized dtype with metadata attribute according to _metadata_dict.
r
"""Get quantized dtype with metadata attribute according to _metadata_dict.
Note that unsigned dtype must have ``zero_point`` and signed dtype must
Note that unsigned dtype must have ``zero_point`` and signed dtype must
not have ``zero_point``, to be consitent with tensor generated by calling
not have ``zero_point``, to be consitent with tensor generated by calling
compiled function from `CompGraph.compile(inputs, outspec)`.
compiled function from `CompGraph.compile(inputs, outspec)`.
...
...
imperative/python/test/unit/autodiff/test_grad_manger.py
浏览文件 @
87f00232
...
@@ -13,6 +13,7 @@ import numpy as np
...
@@ -13,6 +13,7 @@ import numpy as np
import
pytest
import
pytest
import
megengine
as
mge
import
megengine
as
mge
import
megengine.core.tensor.dtype
as
dtype
import
megengine.distributed
as
dist
import
megengine.distributed
as
dist
import
megengine.functional
as
F
import
megengine.functional
as
F
import
megengine.module
as
M
import
megengine.module
as
M
...
@@ -469,3 +470,18 @@ def test_2nd_grad_with_custom_gradient():
...
@@ -469,3 +470,18 @@ def test_2nd_grad_with_custom_gradient():
np
.
testing
.
assert_almost_equal
(
np
.
testing
.
assert_almost_equal
(
x
.
grad
.
numpy
(),
-
np
.
sin
(
x_np
)
-
np
.
cos
(
x_np
),
decimal
=
5
x
.
grad
.
numpy
(),
-
np
.
sin
(
x_np
)
-
np
.
cos
(
x_np
),
decimal
=
5
)
)
@
pytest
.
mark
.
parametrize
(
"invalid_dtype"
,
[
np
.
uint8
,
np
.
int8
,
np
.
int32
])
def
test_attach_invalid_tensor_dtype
(
invalid_dtype
):
gm
=
GradManager
()
x
=
mge
.
tensor
([
1
],
dtype
=
invalid_dtype
)
with
pytest
.
raises
(
AssertionError
):
gm
.
attach
([
x
])
@
pytest
.
mark
.
parametrize
(
"differentible_dtype"
,
[
np
.
float32
,
np
.
float16
])
def
test_attach_differentible_tensor_dtype
(
differentible_dtype
):
gm
=
GradManager
()
x
=
mge
.
tensor
([
1
],
dtype
=
differentible_dtype
)
gm
.
attach
([
x
])
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录