Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
09241a1f
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
09241a1f
编写于
10月 10, 2020
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
feat(mge): remove param_pack_* from functional
GitOrigin-RevId: a5fe25be8cc443ba169af7a74a8cdb4a03f558ea
上级
0d4568d6
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
124 addition
and
132 deletion
+124
-132
imperative/python/megengine/distributed/helper.py
imperative/python/megengine/distributed/helper.py
+102
-1
imperative/python/megengine/functional/param_pack.py
imperative/python/megengine/functional/param_pack.py
+0
-34
imperative/python/megengine/functional/tensor.py
imperative/python/megengine/functional/tensor.py
+0
-80
imperative/python/test/unit/distributed/test_distributed.py
imperative/python/test/unit/distributed/test_distributed.py
+22
-1
imperative/python/test/unit/functional/test_tensor.py
imperative/python/test/unit/functional/test_tensor.py
+0
-16
未找到文件。
imperative/python/megengine/distributed/helper.py
浏览文件 @
09241a1f
...
...
@@ -17,13 +17,114 @@ import numpy as np
from
megengine.autodiff.grad_manager
import
GradManager
,
get_backwarding_grad_manager
from
megengine.device
import
get_default_device
,
get_device_count
from
..functional.param_pack
import
get_offsets
,
pack_allreduce_split
from
..core.ops.builtin
import
ParamPackConcat
,
ParamPackSplit
from
..core.tensor.core
import
apply
from
..functional.utils
import
copy
from
..tensor
import
Tensor
from
..utils.future
import
Future
from
.functional
import
all_reduce_sum
,
broadcast
from
.group
import
WORLD
,
Group
,
group_barrier
,
is_distributed
def
param_pack_split
(
inp
:
Tensor
,
offsets
:
list
,
shapes
:
list
):
r
"""
Returns split tensor to tensor list as offsets and shapes described,
only used for ``parampack``.
:param inp: input tensor.
:param offsets: offsets of outputs, length of `2 * n`,
while n is tensor nums you want to split,
format `[begin0, end0, begin1, end1]`.
:param shapes: tensor shapes of outputs.
:return: splitted tensors.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
from megengine.distributed.helper import param_pack_split
a = tensor(np.ones((10,), np.int32))
b, c = param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
print(b.numpy())
print(c.numpy())
Outputs:
.. testoutput::
[1]
[[1 1 1]
[1 1 1]
[1 1 1]]
"""
op
=
ParamPackSplit
()
op
.
offsets
=
offsets
op
.
shapes
=
shapes
return
apply
(
op
,
inp
)
def
param_pack_concat
(
inps
:
list
,
offsets
:
Tensor
,
offsets_val
:
list
):
r
"""
Returns concated tensor, only used for ``parampack``.
:param inps: input tensors.
:param offsets: device value of offsets.
:param offsets_val: offsets of inputs, length of `2 * n`,
format `[begin0, end0, begin1, end1]`.
:return: concated tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
from megengine.distributed.helper import param_pack_concat
a = tensor(np.ones((1,), np.int32))
b = tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10]
offsets = tensor(offsets_val, np.int32)
c = param_pack_concat([a, b], offsets, offsets_val)
print(c.numpy())
Outputs:
.. testoutput::
[1 1 1 1 1 1 1 1 1 1]
"""
op
=
ParamPackConcat
()
op
.
offsets
=
offsets_val
return
apply
(
op
,
*
inps
,
offsets
)[
0
]
def
get_offsets
(
shapes
):
offsets
=
[]
offset
=
0
for
shape
in
shapes
:
offsets
.
append
(
offset
)
offset
+=
int
(
np
.
prod
(
shape
))
offsets
.
append
(
offset
)
return
offsets
def
pack_allreduce_split
(
pack_list
,
shapes
,
group
,
reduce_method
):
offsets_val
=
get_offsets
(
shapes
)
offsets
=
Tensor
(
offsets_val
)
packed_grads
=
param_pack_concat
(
pack_list
,
offsets
,
offsets_val
)
packed_grads
=
all_reduce_sum
(
packed_grads
,
group
,
group
.
comp_node
)
if
reduce_method
==
"mean"
:
packed_grads
/=
group
.
size
grads
=
param_pack_split
(
packed_grads
,
offsets_val
,
shapes
)
return
grads
class
TensorFuture
(
Future
):
def
device
(
self
):
raise
"Sorry, this tensor is not ready"
...
...
imperative/python/megengine/functional/param_pack.py
已删除
100644 → 0
浏览文件 @
0d4568d6
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import
numpy
as
np
from
..tensor
import
Tensor
from
.distributed
import
all_reduce_sum
from
.tensor
import
param_pack_concat
,
param_pack_split
def
get_offsets
(
shapes
):
offsets
=
[]
offset
=
0
for
shape
in
shapes
:
offsets
.
append
(
offset
)
offset
+=
int
(
np
.
prod
(
shape
))
offsets
.
append
(
offset
)
return
offsets
def
pack_allreduce_split
(
pack_list
,
shapes
,
group
,
reduce_method
):
offsets_val
=
get_offsets
(
shapes
)
offsets
=
Tensor
(
offsets_val
)
packed_grads
=
param_pack_concat
(
pack_list
,
offsets
,
offsets_val
)
packed_grads
=
all_reduce_sum
(
packed_grads
,
group
,
group
.
comp_node
)
if
reduce_method
==
"mean"
:
packed_grads
/=
group
.
size
grads
=
param_pack_split
(
packed_grads
,
offsets_val
,
shapes
)
return
grads
imperative/python/megengine/functional/tensor.py
浏览文件 @
09241a1f
...
...
@@ -46,8 +46,6 @@ __all__ = [
"linspace"
,
"ones"
,
"ones_like"
,
"param_pack_concat"
,
"param_pack_split"
,
"reshape"
,
"split"
,
"squeeze"
,
...
...
@@ -975,81 +973,3 @@ def arange(
if
np
.
dtype
(
dtype
)
==
np
.
int32
:
return
result
.
astype
(
dtype
)
return
result
def
param_pack_split
(
inp
:
Tensor
,
offsets
:
List
,
shapes
:
List
)
->
Tensor
:
r
"""
Returns split tensor to tensor list as offsets and shapes described,
only used for ``parampack``.
:param inp: input tensor.
:param offsets: offsets of outputs, length of `2 * n`,
while n is tensor nums you want to split,
format `[begin0, end0, begin1, end1]`.
:param shapes: tensor shapes of outputs.
:return: splitted tensors.
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
from megengine import tensor
a = tensor(np.ones((10,), np.int32))
b, c = F.param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
print(b.numpy())
print(c.numpy())
Outputs:
.. testoutput::
[1]
[[1 1 1]
[1 1 1]
[1 1 1]]
"""
op
=
builtin
.
ParamPackSplit
()
op
.
offsets
=
offsets
op
.
shapes
=
shapes
return
apply
(
op
,
inp
)
def
param_pack_concat
(
inps
:
List
,
offsets
:
Tensor
,
offsets_val
:
List
)
->
Tensor
:
r
"""
Returns concated tensor, only used for ``parampack``.
:param inps: input tensors.
:param offsets: device value of offsets.
:param offsets_val: offsets of inputs, length of `2 * n`,
format `[begin0, end0, begin1, end1]`.
:return: concated tensor.
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
from megengine import tensor
a = tensor(np.ones((1,), np.int32))
b = tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10]
offsets = tensor(offsets_val, np.int32)
c = F.param_pack_concat([a, b], offsets, offsets_val)
print(c.numpy())
Outputs:
.. testoutput::
[1 1 1 1 1 1 1 1 1 1]
"""
op
=
builtin
.
ParamPackConcat
()
op
.
offsets
=
offsets_val
return
apply
(
op
,
*
inps
,
offsets
)[
0
]
imperative/python/test/unit/distributed/test_distributed.py
浏览文件 @
09241a1f
...
...
@@ -10,12 +10,17 @@ import multiprocessing as mp
import
platform
import
queue
import
numpy
as
np
import
pytest
import
megengine
as
mge
import
megengine.distributed
as
dist
from
megengine.core.ops.builtin
import
CollectiveComm
,
ParamPackConcat
,
ParamPackSplit
from
megengine.distributed.helper
import
get_device_count_by_fork
from
megengine.distributed.helper
import
(
get_device_count_by_fork
,
param_pack_concat
,
param_pack_split
,
)
def
_assert_q_empty
(
q
):
...
...
@@ -195,3 +200,19 @@ def test_oprmm_hashable():
rhs
=
(
CollectiveComm
(),
ParamPackConcat
(),
ParamPackSplit
())
assert
lhs
==
rhs
assert
hash
(
lhs
)
==
hash
(
rhs
)
def
test_param_pack_split
():
a
=
mge
.
Tensor
(
np
.
ones
((
10
,),
np
.
int32
))
b
,
c
=
param_pack_split
(
a
,
[
0
,
1
,
1
,
10
],
[(
1
,),
(
3
,
3
)])
assert
np
.
allclose
(
b
.
numpy
(),
a
.
numpy
()[
1
])
assert
np
.
allclose
(
c
.
numpy
(),
a
.
numpy
()[
1
:].
reshape
(
3
,
3
))
def
test_param_pack_concat
():
a
=
mge
.
Tensor
(
np
.
ones
((
1
,),
np
.
int32
))
b
=
mge
.
Tensor
(
np
.
ones
((
3
,
3
),
np
.
int32
))
offsets_val
=
[
0
,
1
,
1
,
10
]
offsets
=
mge
.
Tensor
(
offsets_val
,
np
.
int32
)
c
=
param_pack_concat
([
a
,
b
],
offsets
,
offsets_val
)
assert
np
.
allclose
(
np
.
concatenate
([
a
.
numpy
(),
b
.
numpy
().
flatten
()]),
c
.
numpy
())
imperative/python/test/unit/functional/test_tensor.py
浏览文件 @
09241a1f
...
...
@@ -359,19 +359,3 @@ def test_copy_d2h():
def
test_copy_d2d
():
copy_test
(
"gpu0"
,
"gpu1"
)
copy_test
(
"gpu0:0"
,
"gpu0:1"
)
def
test_param_pack_split
():
a
=
tensor
(
np
.
ones
((
10
,),
np
.
int32
))
b
,
c
=
F
.
param_pack_split
(
a
,
[
0
,
1
,
1
,
10
],
[(
1
,),
(
3
,
3
)])
assert
np
.
allclose
(
b
.
numpy
(),
a
.
numpy
()[
1
])
assert
np
.
allclose
(
c
.
numpy
(),
a
.
numpy
()[
1
:].
reshape
(
3
,
3
))
def
test_param_pack_concat
():
a
=
tensor
(
np
.
ones
((
1
,),
np
.
int32
))
b
=
tensor
(
np
.
ones
((
3
,
3
),
np
.
int32
))
offsets_val
=
[
0
,
1
,
1
,
10
]
offsets
=
tensor
(
offsets_val
,
np
.
int32
)
c
=
F
.
param_pack_concat
([
a
,
b
],
offsets
,
offsets_val
)
assert
np
.
allclose
(
np
.
concatenate
([
a
.
numpy
(),
b
.
numpy
().
flatten
()]),
c
.
numpy
())
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录