Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
4aaae995
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
404
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
4aaae995
编写于
1月 20, 2021
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
feat(mge/functional): add python wrapper to resize opr
GitOrigin-RevId: b7cc6dd829531d750c6d61c9dc316d7999d82cfc
上级
d04b4bc0
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
113 addition
and
1 deletion
+113
-1
imperative/python/megengine/functional/nn.py
imperative/python/megengine/functional/nn.py
+37
-1
imperative/python/test/unit/core/test_autodiff.py
imperative/python/test/unit/core/test_autodiff.py
+11
-0
imperative/python/test/unit/functional/test_functional.py
imperative/python/test/unit/functional/test_functional.py
+25
-0
imperative/src/impl/ops/resize.cpp
imperative/src/impl/ops/resize.cpp
+38
-0
src/core/include/megbrain/ir/ops.td
src/core/include/megbrain/ir/ops.td
+2
-0
未找到文件。
imperative/python/megengine/functional/nn.py
浏览文件 @
4aaae995
...
...
@@ -7,7 +7,7 @@
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=too-many-lines
from
typing
import
Optional
,
Sequence
,
Tuple
,
Union
from
typing
import
Iterable
,
Optional
,
Sequence
,
Tuple
,
Union
from
..core._imperative_rt
import
CompNode
from
..core._imperative_rt.core2
import
apply
...
...
@@ -58,6 +58,7 @@ __all__ = [
"one_hot"
,
"prelu"
,
"remap"
,
"resize"
,
"softmax"
,
"softplus"
,
"svd"
,
...
...
@@ -878,6 +879,41 @@ def one_hot(inp: Tensor, num_classes: int) -> Tensor:
return
result
def
resize
(
inp
:
Tensor
,
target_shape
:
Iterable
[
int
],
interp_mode
:
str
=
"LINEAR"
)
->
Tensor
:
r
"""
Applies resize transformation to batched 2D images.
:param inp: `(N, C, H, W)` input tensor. Currently only support "NCHW" format.
:param target_shape: `(H, W)` target images shape.
:param interp_mode: interpolation methods. Defaule mode is "LINEAR", Currently only support "LINEAR".
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.random.randn(10, 3, 32, 32))
out = F.resize(x, (16, 16))
print(out.numpy().shape)
Outputs:
.. testoutput::
(10, 3, 16, 16)
"""
op
=
builtin
.
Resize
(
imode
=
interp_mode
,
format
=
"NCHW"
)
shape
=
astensor1d
(
target_shape
,
inp
,
dtype
=
"int32"
,
device
=
inp
.
device
)
(
result
,)
=
apply
(
op
,
inp
,
shape
)
return
result
def
warp_perspective
(
inp
:
Tensor
,
M
:
Tensor
,
...
...
imperative/python/test/unit/core/test_autodiff.py
浏览文件 @
4aaae995
...
...
@@ -373,6 +373,17 @@ def test_Broadcast():
np
.
testing
.
assert_equal
(
np
.
ones
((
3
,
3
,
1
),
dtype
=
np
.
float32
)
*
10
,
x
.
grad
.
numpy
())
def
test_resize
():
x_np
=
np
.
random
.
rand
(
3
,
3
,
32
,
32
).
astype
(
"float32"
)
x
=
mge
.
Tensor
(
x_np
)
grad
=
Grad
().
wrt
(
x
,
callback
=
save_to
(
x
))
y
=
F
.
resize
(
x
,
(
16
,
16
))
grad
(
y
,
F
.
ones_like
(
y
))
np
.
testing
.
assert_equal
(
np
.
ones
(
x_np
.
shape
,
dtype
=
np
.
float32
)
/
4
,
x
.
grad
.
numpy
())
def
test_Reduce_sum
():
x_np
=
np
.
random
.
rand
(
3
,
3
).
astype
(
"float32"
)
x
=
mge
.
Tensor
(
x_np
)
...
...
imperative/python/test/unit/functional/test_functional.py
浏览文件 @
4aaae995
...
...
@@ -328,6 +328,31 @@ def test_one_hot():
onehot_high_dimension
()
def
test_resize
():
# check shape
test_cases
=
[
[(
1
,
1
,
10
,
10
),
(
5
,
5
)],
[(
1
,
3
,
10
,
10
),
(
20
,
20
)],
[(
10
,
1
,
10
,
10
),
(
1
,
1
)],
[(
10
,
10
,
1
,
1
),
(
10
,
10
)],
]
for
inp_shape
,
target_shape
in
test_cases
:
x
=
tensor
(
np
.
random
.
randn
(
*
inp_shape
),
dtype
=
np
.
float32
)
out
=
F
.
resize
(
x
,
target_shape
,
interp_mode
=
"LINEAR"
)
assert
out
.
shape
[
0
]
==
x
.
shape
[
0
]
and
out
.
shape
[
1
]
==
x
.
shape
[
1
]
assert
out
.
shape
[
2
]
==
target_shape
[
0
]
and
out
.
shape
[
3
]
==
target_shape
[
1
]
# check value
x
=
tensor
(
np
.
ones
((
3
,
3
,
10
,
10
)),
dtype
=
np
.
float32
)
out
=
F
.
resize
(
x
,
(
15
,
5
),
interp_mode
=
"LINEAR"
)
np
.
testing
.
assert_equal
(
out
.
numpy
(),
np
.
ones
((
3
,
3
,
15
,
5
)).
astype
(
np
.
float32
))
np_x
=
np
.
arange
(
32
)
x
=
tensor
(
np_x
).
astype
(
np
.
float32
).
reshape
(
1
,
1
,
32
,
1
)
out
=
F
.
resize
(
x
,
(
1
,
1
),
interp_mode
=
"LINEAR"
)
np
.
testing
.
assert_equal
(
out
.
item
(),
np_x
.
mean
())
def
test_warp_perspective
():
inp_shape
=
(
1
,
1
,
4
,
4
)
x
=
tensor
(
np
.
arange
(
16
,
dtype
=
np
.
float32
).
reshape
(
inp_shape
))
...
...
imperative/src/impl/ops/resize.cpp
0 → 100644
浏览文件 @
4aaae995
/**
* \file imperative/src/impl/ops/resize.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "megbrain/imperative/ops/autogen.h"
#include "megbrain/opr/imgproc.h"
#include "../op_trait.h"
namespace
mgb
{
namespace
imperative
{
namespace
{
auto
apply_on_var_node
(
const
OpDef
&
def
,
const
VarNodeArray
&
inputs
)
{
auto
&&
op
=
static_cast
<
const
Resize
&>
(
def
);
mgb_assert
(
inputs
.
size
()
==
2
);
return
opr
::
Resize
::
make
(
inputs
[
0
],
inputs
[
1
],
op
.
param
());
}
OP_TRAIT_REG
(
Resize
,
Resize
)
.
apply_on_var_node
(
apply_on_var_node
)
.
fallback
();
}
// anonymous namespace
}
// namespace imperative
}
// namespace mgb
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
src/core/include/megbrain/ir/ops.td
浏览文件 @
4aaae995
...
...
@@ -76,6 +76,8 @@ def WarpPerspective: MgbHashableOp<"WarpPerspective", [WarpPerspectiveParam]>;
def Remap: MgbHashableOp<"Remap", [RemapParam]>;
def Resize: MgbHashableOp<"Resize", [ResizeParam]>;
def IndexingOneHot: MgbHashableOp<"IndexingOneHot", [AxisParam]>;
def IndexingSetOneHot: MgbHashableOp<"IndexingSetOneHot", [AxisParam]>;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录