Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
0e303710
MegEngine
项目概览
MegEngine 天元
/
MegEngine
接近 2 年 前同步成功
通知
414
Star
4708
Fork
583
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
0e303710
编写于
9月 17, 2020
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
feat(mge/module): add group_local_conv
GitOrigin-RevId: 0503377bc5f2ed8edc8c21b4f0382c7a38462a86
上级
d225cbcd
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
74 addition
and
59 deletion
+74
-59
imperative/python/megengine/functional/nn.py
imperative/python/megengine/functional/nn.py
+7
-5
imperative/python/megengine/module/conv.py
imperative/python/megengine/module/conv.py
+1
-1
imperative/python/test/unit/module/test_conv.py
imperative/python/test/unit/module/test_conv.py
+66
-53
未找到文件。
imperative/python/megengine/functional/nn.py
浏览文件 @
0e303710
...
@@ -221,10 +221,8 @@ def local_conv2d(
...
@@ -221,10 +221,8 @@ def local_conv2d(
padding
:
Union
[
int
,
Tuple
[
int
,
int
]]
=
0
,
padding
:
Union
[
int
,
Tuple
[
int
,
int
]]
=
0
,
dilation
:
Union
[
int
,
Tuple
[
int
,
int
]]
=
1
,
dilation
:
Union
[
int
,
Tuple
[
int
,
int
]]
=
1
,
conv_mode
=
"CROSS_CORRELATION"
,
conv_mode
=
"CROSS_CORRELATION"
,
)
->
Tensor
:
):
"""Applies spatial 2D convolution over an image with unshared kernels.
"""Applies spatial 2D convolution over an groupped channeled image with untied kernels.
Refer to :class:`~.LocalConv2d` for more information.
"""
"""
assert
conv_mode
==
"CROSS_CORRELATION"
or
conv_mode
.
name
==
"CROSS_CORRELATION"
assert
conv_mode
==
"CROSS_CORRELATION"
or
conv_mode
.
name
==
"CROSS_CORRELATION"
...
@@ -232,6 +230,8 @@ def local_conv2d(
...
@@ -232,6 +230,8 @@ def local_conv2d(
pad_h
,
pad_w
=
expand_hw
(
padding
)
pad_h
,
pad_w
=
expand_hw
(
padding
)
dilate_h
,
dilate_w
=
expand_hw
(
dilation
)
dilate_h
,
dilate_w
=
expand_hw
(
dilation
)
Sparse
=
P
.
Convolution
.
Sparse
op
=
builtin
.
GroupLocal
(
op
=
builtin
.
GroupLocal
(
stride_h
=
stride_h
,
stride_h
=
stride_h
,
stride_w
=
stride_w
,
stride_w
=
stride_w
,
...
@@ -239,7 +239,9 @@ def local_conv2d(
...
@@ -239,7 +239,9 @@ def local_conv2d(
pad_w
=
pad_w
,
pad_w
=
pad_w
,
dilate_h
=
dilate_h
,
dilate_h
=
dilate_h
,
dilate_w
=
dilate_w
,
dilate_w
=
dilate_w
,
# strategy=get_conv_execution_strategy(),
mode
=
conv_mode
,
compute_mode
=
"DEFAULT"
,
sparse
=
Sparse
.
DENSE
,
)
)
inp
,
weight
=
utils
.
convert_inputs
(
inp
,
weight
)
inp
,
weight
=
utils
.
convert_inputs
(
inp
,
weight
)
(
output
,)
=
apply
(
op
,
inp
,
weight
)
(
output
,)
=
apply
(
op
,
inp
,
weight
)
...
...
imperative/python/megengine/module/conv.py
浏览文件 @
0e303710
...
@@ -340,7 +340,7 @@ class ConvTranspose2d(_ConvNd):
...
@@ -340,7 +340,7 @@ class ConvTranspose2d(_ConvNd):
class
LocalConv2d
(
Conv2d
):
class
LocalConv2d
(
Conv2d
):
r
"""Applies a spatial convolution with un
shared kernels over an
input 4D tensor.
r
"""Applies a spatial convolution with un
tied kernels over an groupped channeled
input 4D tensor.
It is also known as the locally connected layer.
It is also known as the locally connected layer.
:param in_channels: number of input channels.
:param in_channels: number of input channels.
...
...
imperative/python/test/unit/module/test_conv.py
浏览文件 @
0e303710
...
@@ -48,62 +48,75 @@ def test_conv_transpose2d():
...
@@ -48,62 +48,75 @@ def test_conv_transpose2d():
conv_transpose2d
.
bias
=
Parameter
(
bias
,
dtype
=
np
.
float32
)
conv_transpose2d
.
bias
=
Parameter
(
bias
,
dtype
=
np
.
float32
)
y
=
conv_transpose2d
(
tensor
(
inp
))
y
=
conv_transpose2d
(
tensor
(
inp
))
np
.
testing
.
assert_al
lclose
(
out
,
y
.
numpy
(),
atol
=
2e-6
)
np
.
testing
.
assert_al
most_equal
(
out
,
y
.
numpy
(),
2e-6
)
def
test_local_conv2d
():
def
test_local_conv2d
():
batch_size
=
10
def
test_func
(
in_channels
=
4
batch_size
,
out_channels
=
8
in_channels
,
input_height
=
8
out_channels
,
input_width
=
8
input_height
,
kernel_size
=
3
input_width
,
stride
=
1
kernel_size
,
padding
=
1
stride
,
dilation
=
1
padding
,
groups
=
1
dilation
,
local_conv2d
=
LocalConv2d
(
groups
,
in_channels
=
in_channels
,
out_channels
=
out_channels
,
input_height
=
input_height
,
input_width
=
input_width
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
,
dilation
=
dilation
,
groups
=
groups
,
)
inputs
=
np
.
random
.
normal
(
size
=
(
batch_size
,
in_channels
,
input_height
,
input_width
)
).
astype
(
np
.
float32
)
output_height
=
(
input_height
+
padding
*
2
-
kernel_size
)
//
stride
+
1
output_width
=
(
input_width
+
padding
*
2
-
kernel_size
)
//
stride
+
1
weights
=
np
.
random
.
normal
(
size
=
(
groups
,
output_height
,
output_width
,
in_channels
//
groups
,
kernel_size
,
kernel_size
,
out_channels
//
groups
,
)
).
astype
(
np
.
float32
)
local_conv2d
.
weight
=
Parameter
(
weights
)
outputs
=
local_conv2d
(
tensor
(
inputs
))
# naive calculation use numpy
# only test output_height == input_height, output_width == input_width, group == 1
inputs
=
np
.
pad
(
inputs
,
((
0
,
0
),
(
0
,
0
),
(
1
,
1
),
(
1
,
1
)))
expected
=
np
.
zeros
(
(
batch_size
,
out_channels
,
output_height
,
output_width
),
dtype
=
np
.
float32
,
)
for
n
,
oc
,
oh
,
ow
in
itertools
.
product
(
*
map
(
range
,
[
batch_size
,
out_channels
,
output_height
,
output_width
])
):
):
ih
,
iw
=
oh
*
stride
,
ow
*
stride
local_conv2d
=
LocalConv2d
(
expected
[
n
,
oc
,
ih
,
iw
]
=
np
.
sum
(
in_channels
=
in_channels
,
inputs
[
n
,
:,
ih
:
ih
+
kernel_size
,
iw
:
iw
+
kernel_size
]
out_channels
=
out_channels
,
*
weights
[
0
,
oh
,
ow
,
:,
:,
:,
oc
]
input_height
=
input_height
,
input_width
=
input_width
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
,
dilation
=
dilation
,
groups
=
groups
,
)
inputs
=
np
.
random
.
normal
(
size
=
(
batch_size
,
in_channels
,
input_height
,
input_width
)
).
astype
(
np
.
float32
)
output_height
=
(
input_height
+
padding
*
2
-
kernel_size
)
//
stride
+
1
output_width
=
(
input_width
+
padding
*
2
-
kernel_size
)
//
stride
+
1
weights
=
np
.
random
.
normal
(
size
=
(
groups
,
output_height
,
output_width
,
in_channels
//
groups
,
kernel_size
,
kernel_size
,
out_channels
//
groups
,
)
).
astype
(
np
.
float32
)
local_conv2d
.
weight
=
Parameter
(
weights
)
outputs
=
local_conv2d
(
tensor
(
inputs
))
# naive calculation use numpy
# only test output_height == input_height, output_width == input_width
inputs
=
np
.
pad
(
inputs
,
((
0
,
0
),
(
0
,
0
),
(
1
,
1
),
(
1
,
1
)))
expected
=
np
.
zeros
(
(
batch_size
,
out_channels
,
output_height
,
output_width
),
dtype
=
np
.
float32
,
)
)
ic_group_size
=
in_channels
//
groups
oc_group_size
=
out_channels
//
groups
for
n
,
oc
,
oh
,
ow
in
itertools
.
product
(
*
map
(
range
,
[
batch_size
,
out_channels
,
output_height
,
output_width
])
):
ih
,
iw
=
oh
*
stride
,
ow
*
stride
g_id
=
oc
//
oc_group_size
expected
[
n
,
oc
,
ih
,
iw
]
=
np
.
sum
(
inputs
[
n
,
g_id
*
ic_group_size
:
(
g_id
+
1
)
*
ic_group_size
,
ih
:
ih
+
kernel_size
,
iw
:
iw
+
kernel_size
,
]
*
weights
[
g_id
,
oh
,
ow
,
:,
:,
:,
oc
%
oc_group_size
]
)
np
.
testing
.
assert_almost_equal
(
outputs
.
numpy
(),
expected
,
1e-5
)
np
.
testing
.
assert_allclose
(
outputs
.
numpy
(),
expected
,
atol
=
1e-5
)
test_func
(
10
,
4
,
4
,
5
,
5
,
3
,
1
,
1
,
1
,
1
)
test_func
(
10
,
32
,
32
,
8
,
8
,
3
,
1
,
1
,
1
,
2
)
test_func
(
10
,
32
,
32
,
8
,
8
,
3
,
1
,
1
,
1
,
4
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录