Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
dcfb6a53
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
404
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
dcfb6a53
编写于
3月 02, 2021
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor(mge/functional): move functional api
GitOrigin-RevId: 9cd3e09996f77a00e114f0870eae55ad80b4ba8b
上级
bfb9def9
变更
16
展开全部
隐藏空白更改
内联
并排
Showing
16 changed file
with
887 addition
and
657 deletion
+887
-657
imperative/python/megengine/distributed/helper.py
imperative/python/megengine/distributed/helper.py
+1
-1
imperative/python/megengine/functional/__init__.py
imperative/python/megengine/functional/__init__.py
+1
-2
imperative/python/megengine/functional/elemwise.py
imperative/python/megengine/functional/elemwise.py
+1
-55
imperative/python/megengine/functional/img_proc.py
imperative/python/megengine/functional/img_proc.py
+0
-50
imperative/python/megengine/functional/loss.py
imperative/python/megengine/functional/loss.py
+2
-3
imperative/python/megengine/functional/math.py
imperative/python/megengine/functional/math.py
+0
-2
imperative/python/megengine/functional/metric.py
imperative/python/megengine/functional/metric.py
+1
-40
imperative/python/megengine/functional/nn.py
imperative/python/megengine/functional/nn.py
+240
-469
imperative/python/megengine/functional/tensor.py
imperative/python/megengine/functional/tensor.py
+33
-3
imperative/python/megengine/functional/vision.py
imperative/python/megengine/functional/vision.py
+576
-0
imperative/python/megengine/module/identity.py
imperative/python/megengine/module/identity.py
+1
-1
imperative/python/test/unit/core/test_autodiff.py
imperative/python/test/unit/core/test_autodiff.py
+1
-1
imperative/python/test/unit/functional/test_functional.py
imperative/python/test/unit/functional/test_functional.py
+18
-18
imperative/python/test/unit/jit/test_tracing.py
imperative/python/test/unit/jit/test_tracing.py
+3
-3
imperative/python/test/unit/utils/test_network_node.py
imperative/python/test/unit/utils/test_network_node.py
+7
-7
imperative/src/impl/ops/vision.cpp
imperative/src/impl/ops/vision.cpp
+2
-2
未找到文件。
imperative/python/megengine/distributed/helper.py
浏览文件 @
dcfb6a53
...
@@ -19,7 +19,7 @@ from megengine.device import get_default_device, get_device_count
...
@@ -19,7 +19,7 @@ from megengine.device import get_default_device, get_device_count
from
..core._imperative_rt.core2
import
apply
from
..core._imperative_rt.core2
import
apply
from
..core.ops.builtin
import
ParamPackConcat
,
ParamPackSplit
from
..core.ops.builtin
import
ParamPackConcat
,
ParamPackSplit
from
..functional.
utils
import
copy
from
..functional.
tensor
import
copy
from
..tensor
import
Tensor
from
..tensor
import
Tensor
from
..utils.future
import
Future
from
..utils.future
import
Future
from
.functional
import
all_reduce_sum
,
broadcast
from
.functional
import
all_reduce_sum
,
broadcast
...
...
imperative/python/megengine/functional/__init__.py
浏览文件 @
dcfb6a53
...
@@ -7,12 +7,11 @@
...
@@ -7,12 +7,11 @@
# software distributed under the License is distributed on an
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=redefined-builtin
# pylint: disable=redefined-builtin
from
.
import
metric
,
vision
from
.elemwise
import
*
from
.elemwise
import
*
from
.img_proc
import
*
from
.math
import
*
from
.math
import
*
from
.nn
import
*
from
.nn
import
*
from
.tensor
import
*
from
.tensor
import
*
from
.utils
import
*
from
.
import
distributed
# isort:skip
from
.
import
distributed
# isort:skip
...
...
imperative/python/megengine/functional/elemwise.py
浏览文件 @
dcfb6a53
...
@@ -7,8 +7,6 @@
...
@@ -7,8 +7,6 @@
# software distributed under the License is distributed on an
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=unused-argument,invalid-name,redefined-builtin,arguments-out-of-order
# pylint: disable=unused-argument,invalid-name,redefined-builtin,arguments-out-of-order
import
functools
import
numpy
as
np
import
numpy
as
np
from
..core._imperative_rt.core2
import
apply
from
..core._imperative_rt.core2
import
apply
...
@@ -17,7 +15,7 @@ from ..core.ops import builtin
...
@@ -17,7 +15,7 @@ from ..core.ops import builtin
from
..core.ops.builtin
import
Elemwise
from
..core.ops.builtin
import
Elemwise
from
..core.tensor
import
utils
from
..core.tensor
import
utils
from
..core.tensor.array_method
import
_elwise_apply
from
..core.tensor.array_method
import
_elwise_apply
from
..core.tensor.utils
import
astype
,
isscalar
,
setscalar
from
..core.tensor.utils
import
astype
from
..device
import
get_default_device
from
..device
import
get_default_device
from
..jit.tracing
import
is_tracing
from
..jit.tracing
import
is_tracing
from
..tensor
import
Tensor
from
..tensor
import
Tensor
...
@@ -44,8 +42,6 @@ __all__ = [
...
@@ -44,8 +42,6 @@ __all__ = [
"floor_div"
,
"floor_div"
,
"greater"
,
"greater"
,
"greater_equal"
,
"greater_equal"
,
"hswish"
,
"hsigmoid"
,
"left_shift"
,
"left_shift"
,
"less"
,
"less"
,
"less_equal"
,
"less_equal"
,
...
@@ -62,11 +58,8 @@ __all__ = [
...
@@ -62,11 +58,8 @@ __all__ = [
"neg"
,
"neg"
,
"not_equal"
,
"not_equal"
,
"pow"
,
"pow"
,
"relu"
,
"relu6"
,
"right_shift"
,
"right_shift"
,
"round"
,
"round"
,
"sigmoid"
,
"sin"
,
"sin"
,
"sinh"
,
"sinh"
,
"sqrt"
,
"sqrt"
,
...
@@ -523,53 +516,6 @@ def greater_equal(x, y):
...
@@ -523,53 +516,6 @@ def greater_equal(x, y):
# other functions
# other functions
def
hswish
(
x
):
"""
Element-wise `x * relu6(x + 3) / 6`.
:param x: input tensor.
:return: computed tensor.
Example:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(5).astype(np.float32))
out = F.hswish(x)
print(out.numpy().round(decimals=4))
.. testoutput::
[0. 0.6667 1.6667 3. 4. ]
"""
return
_elwise
(
x
,
mode
=
Elemwise
.
Mode
.
H_SWISH
)
def
hsigmoid
(
x
):
"""Element-wise `relu6(x + 3) / 6`."""
return
relu6
(
x
+
3
)
/
6
def
relu
(
x
):
"""Element-wise `max(x, 0)`."""
return
_elwise
(
x
,
mode
=
Elemwise
.
Mode
.
RELU
)
def
relu6
(
x
):
"""Element-wise `min(max(x, 0), 6)`."""
return
minimum
(
maximum
(
x
,
0
),
6
)
def
sigmoid
(
x
):
"""Element-wise `1 / ( 1 + exp( -x ) )`."""
return
_elwise
(
x
,
mode
=
Elemwise
.
Mode
.
SIGMOID
)
def
clip
(
x
:
Tensor
,
lower
=
None
,
upper
=
None
)
->
Tensor
:
def
clip
(
x
:
Tensor
,
lower
=
None
,
upper
=
None
)
->
Tensor
:
r
"""
r
"""
Clamps all elements in input tensor into the range `[` :attr:`lower`, :attr:`upper` `]` and returns
Clamps all elements in input tensor into the range `[` :attr:`lower`, :attr:`upper` `]` and returns
...
...
imperative/python/megengine/functional/img_proc.py
已删除
100644 → 0
浏览文件 @
bfb9def9
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from
..core._imperative_rt.core2
import
apply
from
..core.ops
import
builtin
from
..tensor
import
Tensor
__all__
=
[
"cvt_color"
,
]
def
cvt_color
(
inp
:
Tensor
,
mode
:
str
=
""
):
r
"""
Convert images from one format to another
:param inp: input images.
:param mode: format mode.
:return: convert result.
Examples:
.. testcode::
import numpy as np
import megengine as mge
import megengine.functional as F
x = mge.tensor(np.array([[[[-0.58675045, 1.7526233, 0.10702174]]]]).astype(np.float32))
y = F.img_proc.cvt_color(x, mode="RGB2GRAY")
print(y.numpy())
Outputs:
.. testoutput::
[[[[0.86555195]]]]
"""
assert
mode
in
builtin
.
CvtColor
.
Mode
.
__dict__
,
"unspport mode for cvt_color"
mode
=
getattr
(
builtin
.
CvtColor
.
Mode
,
mode
)
assert
isinstance
(
mode
,
builtin
.
CvtColor
.
Mode
)
op
=
builtin
.
CvtColor
(
mode
=
mode
)
(
out
,)
=
apply
(
op
,
inp
)
return
out
imperative/python/megengine/functional/loss.py
浏览文件 @
dcfb6a53
...
@@ -8,10 +8,9 @@
...
@@ -8,10 +8,9 @@
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import
numpy
as
np
import
numpy
as
np
from
..core.tensor.utils
import
make_shape_tuple
from
..tensor
import
Tensor
from
..tensor
import
Tensor
from
.elemwise
import
abs
,
equal
,
exp
,
log
,
maximum
,
pow
,
relu
from
.elemwise
import
abs
,
log
from
.nn
import
indexing_one_hot
,
logsigmoid
,
logsumexp
from
.nn
import
indexing_one_hot
,
logsigmoid
,
logsumexp
,
relu
from
.tensor
import
where
from
.tensor
import
where
__all__
=
[
__all__
=
[
...
...
imperative/python/megengine/functional/math.py
浏览文件 @
dcfb6a53
...
@@ -7,9 +7,7 @@
...
@@ -7,9 +7,7 @@
# software distributed under the License is distributed on an
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import
collections
import
collections
import
functools
import
math
import
math
import
numbers
from
typing
import
Optional
,
Sequence
,
Tuple
,
Union
from
typing
import
Optional
,
Sequence
,
Tuple
,
Union
from
..core._imperative_rt.core2
import
apply
from
..core._imperative_rt.core2
import
apply
...
...
imperative/python/megengine/functional/
utils
.py
→
imperative/python/megengine/functional/
metric
.py
浏览文件 @
dcfb6a53
...
@@ -6,23 +6,14 @@
...
@@ -6,23 +6,14 @@
# Unless required by applicable law or agreed to in writing,
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import
collections
from
typing
import
Iterable
,
Union
from
typing
import
Iterable
,
Union
import
numpy
as
np
import
numpy
as
np
from
..core._imperative_rt.core2
import
apply
from
..core._wrap
import
device
as
as_device
from
..core.ops.builtin
import
Copy
,
Identity
from
..tensor
import
Tensor
from
..tensor
import
Tensor
from
.math
import
topk
as
_topk
from
.math
import
topk
as
_topk
from
.tensor
import
broadcast_to
,
transpose
from
.tensor
import
broadcast_to
,
transpose
__all__
=
[
"topk_accuracy"
,
"copy"
,
]
def
topk_accuracy
(
def
topk_accuracy
(
logits
:
Tensor
,
target
:
Tensor
,
topk
:
Union
[
int
,
Iterable
[
int
]]
=
1
logits
:
Tensor
,
target
:
Tensor
,
topk
:
Union
[
int
,
Iterable
[
int
]]
=
1
...
@@ -46,7 +37,7 @@ def topk_accuracy(
...
@@ -46,7 +37,7 @@ def topk_accuracy(
logits = tensor(np.arange(80, dtype=np.int32).reshape(8,10))
logits = tensor(np.arange(80, dtype=np.int32).reshape(8,10))
target = tensor(np.arange(8, dtype=np.int32))
target = tensor(np.arange(8, dtype=np.int32))
top1, top5 = F.topk_accuracy(logits, target, (1, 5))
top1, top5 = F.
metric.
topk_accuracy(logits, target, (1, 5))
print(top1.numpy(), top5.numpy())
print(top1.numpy(), top5.numpy())
Outputs:
Outputs:
...
@@ -67,33 +58,3 @@ def topk_accuracy(
...
@@ -67,33 +58,3 @@ def topk_accuracy(
if
len
(
topk
)
==
1
:
# type: ignore[arg-type]
if
len
(
topk
)
==
1
:
# type: ignore[arg-type]
accs
=
accs
[
0
]
accs
=
accs
[
0
]
return
accs
return
accs
def
copy
(
inp
,
device
=
None
):
r
"""
Copies tensor to another device.
:param inp: input tensor.
:param device: destination device.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor([1, 2, 3], np.int32)
y = F.copy(x, "xpu1")
print(y.numpy())
Outputs:
.. testoutput::
[1 2 3]
"""
if
device
is
None
:
return
apply
(
Identity
(),
inp
)[
0
]
return
apply
(
Copy
(
comp_node
=
as_device
(
device
).
to_c
()),
inp
)[
0
]
imperative/python/megengine/functional/nn.py
浏览文件 @
dcfb6a53
此差异已折叠。
点击以展开。
imperative/python/megengine/functional/tensor.py
浏览文件 @
dcfb6a53
...
@@ -6,10 +6,8 @@
...
@@ -6,10 +6,8 @@
# Unless required by applicable law or agreed to in writing,
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import
functools
import
math
import
math
from
itertools
import
accumulate
from
typing
import
Iterable
,
Optional
,
Sequence
,
Union
from
typing
import
Iterable
,
List
,
Optional
,
Sequence
,
Tuple
,
Union
import
numpy
as
np
import
numpy
as
np
...
@@ -17,6 +15,7 @@ from ..core._imperative_rt import CompNode
...
@@ -17,6 +15,7 @@ from ..core._imperative_rt import CompNode
from
..core._imperative_rt.core2
import
apply
from
..core._imperative_rt.core2
import
apply
from
..core._wrap
import
device
as
as_device
from
..core._wrap
import
device
as
as_device
from
..core.ops
import
builtin
from
..core.ops
import
builtin
from
..core.ops.builtin
import
Copy
,
Identity
from
..core.ops.special
import
Const
from
..core.ops.special
import
Const
from
..core.tensor.array_method
import
_broadcast
,
_remove_axis
from
..core.tensor.array_method
import
_broadcast
,
_remove_axis
from
..core.tensor.utils
import
(
from
..core.tensor.utils
import
(
...
@@ -51,6 +50,7 @@ __all__ = [
...
@@ -51,6 +50,7 @@ __all__ = [
"stack"
,
"stack"
,
"scatter"
,
"scatter"
,
"tile"
,
"tile"
,
"copy"
,
"transpose"
,
"transpose"
,
"where"
,
"where"
,
"zeros"
,
"zeros"
,
...
@@ -1130,3 +1130,33 @@ def tile(inp: Tensor, reps: Iterable[int]):
...
@@ -1130,3 +1130,33 @@ def tile(inp: Tensor, reps: Iterable[int]):
inp
=
broadcast_to
(
inp
.
reshape
(
base_shape
),
bcast_shape
).
reshape
(
target_shape
)
inp
=
broadcast_to
(
inp
.
reshape
(
base_shape
),
bcast_shape
).
reshape
(
target_shape
)
return
inp
return
inp
def
copy
(
inp
,
device
=
None
):
r
"""
Copies tensor to another device.
:param inp: input tensor.
:param device: destination device.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor([1, 2, 3], np.int32)
y = F.copy(x, "xpu1")
print(y.numpy())
Outputs:
.. testoutput::
[1 2 3]
"""
if
device
is
None
:
return
apply
(
Identity
(),
inp
)[
0
]
return
apply
(
Copy
(
comp_node
=
as_device
(
device
).
to_c
()),
inp
)[
0
]
imperative/python/megengine/functional/vision.py
0 → 100644
浏览文件 @
dcfb6a53
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from
typing
import
Iterable
,
Optional
,
Tuple
,
Union
from
..core._imperative_rt.core2
import
apply
from
..core.ops
import
builtin
from
..core.tensor
import
megbrain_graph
,
utils
from
..core.tensor.utils
import
astensor1d
from
..jit.tracing
import
is_tracing
from
..tensor
import
Tensor
from
.elemwise
import
floor
from
.math
import
argsort
from
.tensor
import
broadcast_to
,
concat
,
expand_dims
,
reshape
def
cvt_color
(
inp
:
Tensor
,
mode
:
str
=
""
):
r
"""
Convert images from one format to another
:param inp: input images.
:param mode: format mode.
:return: convert result.
Examples:
.. testcode::
import numpy as np
import megengine as mge
import megengine.functional as F
x = mge.tensor(np.array([[[[-0.58675045, 1.7526233, 0.10702174]]]]).astype(np.float32))
y = F.vision.cvt_color(x, mode="RGB2GRAY")
print(y.numpy())
Outputs:
.. testoutput::
[[[[0.86555195]]]]
"""
assert
mode
in
builtin
.
CvtColor
.
Mode
.
__dict__
,
"unspport mode for cvt_color"
mode
=
getattr
(
builtin
.
CvtColor
.
Mode
,
mode
)
assert
isinstance
(
mode
,
builtin
.
CvtColor
.
Mode
)
op
=
builtin
.
CvtColor
(
mode
=
mode
)
(
out
,)
=
apply
(
op
,
inp
)
return
out
def
roi_pooling
(
inp
:
Tensor
,
rois
:
Tensor
,
output_shape
:
Union
[
int
,
tuple
,
list
],
mode
:
str
=
"max"
,
scale
:
float
=
1.0
,
)
->
Tensor
:
"""
Applies roi pooling on input feature.
:param inp: tensor that represents the input feature, `(N, C, H, W)` images.
:param rois: `(K, 5)` boxes. First column is the index into N. The other 4 columns are xyxy.
:param output_shape: `(height, width)` of output rois feature.
:param mode: "max" or "average", use max/average align just like max/average pooling. Default: "max"
:param scale: scale the input boxes by this number. Default: 1.0
:return: `(K, C, output_shape[0], output_shape[1])` feature of rois.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
np.random.seed(42)
inp = tensor(np.random.randn(1, 1, 128, 128))
rois = tensor(np.random.random((4, 5)))
y = F.vision.roi_pooling(inp, rois, (2, 2))
print(y.numpy()[0].round(decimals=4))
Outputs:
.. testoutput::
[[[-0.1383 -0.1383]
[-0.5035 -0.5035]]]
"""
assert
mode
in
[
"max"
,
"average"
],
"only max/average mode is supported"
if
isinstance
(
output_shape
,
int
):
output_shape
=
(
output_shape
,
output_shape
)
op
=
builtin
.
ROIPooling
(
mode
=
mode
,
scale
=
scale
)
inp
,
rois
=
utils
.
convert_inputs
(
inp
,
rois
)
result
,
_
=
apply
(
op
,
inp
,
rois
,
Tensor
(
output_shape
,
dtype
=
"int32"
,
device
=
inp
.
device
)
)
return
result
def
roi_align
(
inp
:
Tensor
,
rois
:
Tensor
,
output_shape
:
Union
[
int
,
tuple
,
list
],
mode
:
str
=
"average"
,
spatial_scale
:
float
=
1.0
,
sample_points
:
Union
[
int
,
tuple
,
list
]
=
2
,
aligned
:
bool
=
True
,
)
->
Tensor
:
"""
Applies roi align on input feature.
:param inp: tensor that represents the input feature, shape is `(N, C, H, W)`.
:param rois: `(N, 5)` boxes. First column is the box index. The other 4 columns are ``xyxy``.
:param output_shape: `(height, width)` shape of output rois feature.
:param mode: "max" or "average", use max/average align just like max/average pooling. Default: "average"
:param spatial_scale: scale the input boxes by this number. Default: 1.0
:param sample_points: number of inputs samples to take for each output sample.
0 to take samples densely. Default: 2
:param aligned: wheather to align the input feature, with `aligned=True`,
we first appropriately scale the ROI and then shift it by -0.5. Default: True
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
np.random.seed(42)
inp = tensor(np.random.randn(1, 1, 128, 128))
rois = tensor(np.random.random((4, 5)))
y = F.vision.roi_align(inp, rois, (2, 2))
print(y.numpy()[0].round(decimals=4))
Outputs:
.. testoutput::
[[[0.175 0.175 ]
[0.1359 0.1359]]]
"""
assert
mode
in
[
"max"
,
"average"
],
"only max/average mode is supported"
if
isinstance
(
output_shape
,
int
):
output_shape
=
(
output_shape
,
output_shape
)
pooled_height
,
pooled_width
=
output_shape
if
isinstance
(
sample_points
,
int
):
sample_points
=
(
sample_points
,
sample_points
)
sample_height
,
sample_width
=
sample_points
offset
=
0.5
if
aligned
else
0.0
op
=
builtin
.
ROIAlign
(
mode
=
mode
,
format
=
"NCHW"
,
spatial_scale
=
spatial_scale
,
offset
=
offset
,
pooled_height
=
pooled_height
,
pooled_width
=
pooled_width
,
sample_height
=
sample_height
,
sample_width
=
sample_width
,
)
inp
,
rois
=
utils
.
convert_inputs
(
inp
,
rois
)
result
,
*
_
=
apply
(
op
,
inp
,
rois
)
return
result
def
nms
(
boxes
:
Tensor
,
scores
:
Tensor
,
iou_thresh
:
float
,
max_output
:
Optional
[
int
]
=
None
)
->
Tensor
:
r
"""
Performs non-maximum suppression (NMS) on the boxes according to their intersection-over-union(IoU).
:param boxes: tensor of shape `(N, 4)`; the boxes to perform nms on; each box is expected to be in `(x1, y1, x2, y2)` format.
:param iou_thresh: IoU threshold for overlapping.
:param scores: tensor of shape `(N,)`, the score of boxes.
:param max_output: the maximum number of boxes to keep; it is optional if this operator is not traced
otherwise it required to be specified; if it is not specified, all boxes are kept.
:return: indices of the elements that have been kept by NMS.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = np.zeros((100,4))
np.random.seed(42)
x[:,:2] = np.random.rand(100,2)*20
x[:,2:] = np.random.rand(100,2)*20 + 100
scores = tensor(np.random.rand(100))
inp = tensor(x)
result = F.vision.nms(inp, scores, iou_thresh=0.7)
print(result.numpy())
Outputs:
.. testoutput::
[75 69]
"""
assert
(
boxes
.
ndim
==
2
and
boxes
.
shape
[
1
]
==
4
),
"the expected shape of boxes is (N, 4)"
assert
scores
.
ndim
==
1
,
"the expected shape of scores is (N,)"
assert
(
boxes
.
shape
[
0
]
==
scores
.
shape
[
0
]
),
"number of boxes and scores are not matched"
boxes
=
boxes
.
detach
()
scores
=
scores
.
detach
()
sorted_idx
=
argsort
(
scores
,
descending
=
True
)
boxes
=
boxes
[
sorted_idx
]
if
is_tracing
():
assert
(
max_output
is
not
None
and
max_output
>
0
),
"max_output should be specified under tracing"
if
max_output
is
None
:
max_output
=
boxes
.
shape
[
0
]
op
=
builtin
.
NMSKeep
(
iou_thresh
,
max_output
)
inp
=
utils
.
convert_inputs
(
boxes
.
reshape
(
1
,
-
1
,
4
))
indices
,
count
=
apply
(
op
,
*
inp
)
indices
=
indices
[
0
][:
count
[
0
]]
keep_inds
=
sorted_idx
[
indices
]
return
keep_inds
def
remap
(
inp
:
Tensor
,
map_xy
:
Tensor
,
border_mode
:
str
=
"REPLICATE"
,
scalar
:
float
=
0.0
,
interp_mode
:
str
=
"LINEAR"
,
)
->
Tensor
:
r
"""
Applies remap transformation to batched 2D images.
The input images are transformed to the output images by the tensor map_xy.
The output's H and W are same as map_xy's H and W.
:param inp: input image
:param map_xy: (batch, oh, ow, 2) transformation matrix
:param border_mode: pixel extrapolation method.
Default: "REPLICATE". Currently also support "CONSTANT", "REFLECT",
"REFLECT_101", "WRAP".
:param scalar: value used in case of a constant border. Default: 0
:param interp_mode: interpolation methods.
Default: "LINEAR". Currently only support "LINEAR" mode.
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
map_xy_shape = (1, 2, 2, 2)
map_xy = tensor(np.array([[[1., 0.],[0., 1.]],
[[0., 1.],[0., 1.]]],
dtype=np.float32).reshape(map_xy_shape))
out = F.vision.remap(inp, map_xy)
print(out.numpy())
Outputs:
.. testoutput::
[[[[1. 4.]
[4. 4.]]]]
"""
op
=
builtin
.
Remap
(
imode
=
interp_mode
,
border_type
=
border_mode
,
format
=
"NCHW"
,
scalar
=
scalar
)
assert
isinstance
(
inp
,
(
Tensor
,
megbrain_graph
.
VarNode
)),
"inp must be Tensor type"
(
result
,)
=
apply
(
op
,
inp
,
map_xy
)
return
result
def
warp_affine
(
inp
:
Tensor
,
weight
:
Tensor
,
out_shape
,
border_mode
=
"REPLICATE"
,
border_val
=
0
,
format
=
"NHWC"
,
imode
=
"LINEAR"
,
):
"""
Batched affine transform on 2D images.
:param inp: input image.
:param weight: weight tensor.
:param out_shape: output tensor shape.
:param border_mode: pixel extrapolation method.
Default: "WRAP". Currently "CONSTANT", "REFLECT",
"REFLECT_101", "ISOLATED", "WRAP", "REPLICATE", "TRANSPARENT" are supported.
:param border_val: value used in case of a constant border. Default: 0
:param format: "NHWC" as default based on historical concerns,
"NCHW" is also supported. Default: "NCHW".
:param imode: interpolation methods. Could be "LINEAR", "NEAREST", "CUBIC", "AREA".
Default: "LINEAR".
:return: output tensor.
.. note::
Here all available options for params are listed,
however it does not mean that you can use all the combinations.
On different platforms, different combinations are supported.
"""
op
=
builtin
.
WarpAffine
(
border_mode
=
border_mode
,
border_val
=
border_val
,
format
=
format
,
imode
=
imode
)
out_shape
=
utils
.
astensor1d
(
out_shape
,
inp
,
dtype
=
"int32"
,
device
=
inp
.
device
)
(
result
,)
=
apply
(
op
,
inp
,
weight
,
out_shape
)
return
result
def
warp_perspective
(
inp
:
Tensor
,
M
:
Tensor
,
dsize
:
Union
[
Tuple
[
int
,
int
],
int
,
Tensor
],
border_mode
:
str
=
"REPLICATE"
,
border_val
:
float
=
0.0
,
interp_mode
:
str
=
"LINEAR"
,
)
->
Tensor
:
r
"""
Applies perspective transformation to batched 2D images.
The input images are transformed to the output images by the transformation matrix:
.. math::
\text{output}(n, c, h, w) = \text{input} \left( n, c,
\frac{M_{00}h + M_{01}w + M_{02}}{M_{20}h + M_{21}w + M_{22}},
\frac{M_{10}h + M_{11}w + M_{12}}{M_{20}h + M_{21}w + M_{22}}
\right)
:param inp: input image.
:param M: `(batch, 3, 3)` transformation matrix.
:param dsize: `(h, w)` size of the output image.
:param border_mode: pixel extrapolation method.
Default: "REPLICATE". Currently also support "CONSTANT", "REFLECT",
"REFLECT_101", "WRAP".
:param border_val: value used in case of a constant border. Default: 0
:param interp_mode: interpolation methods.
Default: "LINEAR". Currently only support "LINEAR" mode.
:return: output tensor.
Note:
The transformation matrix is the inverse of that used by `cv2.warpPerspective`.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(np.array([[1., 0., 1.],
[0., 1., 1.],
[0., 0., 1.]], dtype=np.float32).reshape(M_shape))
out = F.vision.warp_perspective(x, M, (2, 2))
print(out.numpy())
Outputs:
.. testoutput::
[[[[ 5. 6.]
[ 9. 10.]]]]
"""
op
=
builtin
.
WarpPerspective
(
imode
=
interp_mode
,
bmode
=
border_mode
,
format
=
"NCHW"
,
border_val
=
border_val
)
inp
,
M
=
utils
.
convert_inputs
(
inp
,
M
)
dsize
=
astensor1d
(
dsize
,
inp
,
dtype
=
"int32"
,
device
=
inp
.
device
)
(
result
,)
=
apply
(
op
,
inp
,
M
,
dsize
)
return
result
def
interpolate
(
inp
:
Tensor
,
size
:
Optional
[
Union
[
int
,
Tuple
[
int
,
int
]]]
=
None
,
scale_factor
:
Optional
[
Union
[
float
,
Tuple
[
float
,
float
]]]
=
None
,
mode
:
str
=
"BILINEAR"
,
align_corners
:
Optional
[
bool
]
=
None
,
)
->
Tensor
:
r
"""
Down/up samples the input tensor to either the given size or with the given scale_factor. ``size`` can not coexist with ``scale_factor``.
:param inp: input tensor.
:param size: size of the output tensor. Default: None
:param scale_factor: scaling factor of the output tensor. Default: None
:param mode: interpolation methods, acceptable values are:
"BILINEAR", "LINEAR". Default: "BILINEAR"
:param align_corners: This only has an effect when `mode`
is "BILINEAR" or "LINEAR". Geometrically, we consider the pixels of the input
and output as squares rather than points. If set to ``True``, the input
and output tensors are aligned by the center points of their corner
pixels, preserving the values at the corner pixels. If set to ``False``,
the input and output tensors are aligned by the corner points of their
corner pixels, and the interpolation uses edge value padding for
out-of-boundary values, making this operation *independent* of input size
:return: output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.vision.interpolate(x, [4, 4], align_corners=False)
print(out.numpy())
out2 = F.vision.interpolate(x, scale_factor=2.)
np.testing.assert_allclose(out.numpy(), out2.numpy())
Outputs:
.. testoutput::
[[[[1. 1.25 1.75 2. ]
[1.5 1.75 2.25 2.5 ]
[2.5 2.75 3.25 3.5 ]
[3. 3.25 3.75 4. ]]]]
"""
mode
=
mode
.
upper
()
if
mode
not
in
[
"BILINEAR"
,
"LINEAR"
]:
raise
ValueError
(
"interpolate only support linear or bilinear mode"
)
if
mode
not
in
[
"BILINEAR"
,
"LINEAR"
]:
if
align_corners
is
not
None
:
raise
ValueError
(
"align_corners option can only be set in the bilinear/linear interpolating mode"
)
else
:
if
align_corners
is
None
:
align_corners
=
False
if
(
size
is
not
None
and
scale_factor
is
None
and
not
align_corners
and
mode
==
"BILINEAR"
and
inp
.
ndim
in
[
4
,
5
]
):
# fastpath for interpolate
op
=
builtin
.
Resize
(
imode
=
"LINEAR"
,
format
=
"NCHW"
)
shape
=
astensor1d
(
size
,
inp
,
dtype
=
"int32"
,
device
=
inp
.
device
)
(
result
,)
=
apply
(
op
,
inp
,
shape
)
return
result
if
mode
==
"LINEAR"
:
inp
=
expand_dims
(
inp
,
3
)
if
inp
.
ndim
!=
4
:
raise
ValueError
(
"shape of input tensor must correspond to the operartion mode"
)
if
size
is
None
:
if
scale_factor
is
None
:
raise
ValueError
(
"scale_factor must not be None when size is None"
)
if
isinstance
(
scale_factor
,
(
float
,
int
)):
scale_factor
=
float
(
scale_factor
)
if
mode
==
"LINEAR"
:
scale_factor
=
(
scale_factor
,
float
(
1
))
else
:
scale_factor
=
(
scale_factor
,
scale_factor
)
else
:
if
mode
==
"LINEAR"
:
raise
ValueError
(
"under LINEAR mode, scale_factor can only be single value"
)
assert
len
(
scale_factor
)
==
2
,
"shape of scale_factor must be equal to (2, )"
assert
isinstance
(
scale_factor
[
0
],
float
)
and
isinstance
(
scale_factor
[
1
],
float
),
"scale_factor must be float type"
dsize
=
tuple
(
floor
(
Tensor
(
inp
.
shape
[
i
+
2
]
*
scale_factor
[
i
],
dtype
=
"float32"
,
device
=
inp
.
device
,
)
)
for
i
in
range
(
2
)
)
dsize
=
concat
([
dsize
[
0
],
dsize
[
1
]],
axis
=
0
)
else
:
if
scale_factor
is
not
None
:
raise
ValueError
(
"scale_factor must be None when size is provided"
)
if
isinstance
(
size
,
int
):
size
=
(
size
,
1
)
else
:
if
mode
==
"LINEAR"
:
raise
ValueError
(
"under LINEAR mode, size can only be single value"
)
dsize
=
size
oh
,
ow
=
dsize
[
0
],
dsize
[
1
]
ih
,
iw
=
inp
.
shape
[
2
],
inp
.
shape
[
3
]
if
align_corners
:
hscale
=
(
ih
-
1.0
)
/
(
oh
-
1.0
)
wscale
=
1.0
*
iw
/
ow
if
mode
!=
"LINEAR"
:
wscale
=
(
iw
-
1.0
)
/
(
ow
-
1.0
)
row0
=
concat
(
[
wscale
,
Tensor
([
0
,
0
],
dtype
=
"float32"
,
device
=
inp
.
device
)],
axis
=
0
).
reshape
(
1
,
3
)
row1
=
concat
(
[
Tensor
(
0
,
dtype
=
"float32"
,
device
=
inp
.
device
),
hscale
,
Tensor
(
0
,
dtype
=
"float32"
,
device
=
inp
.
device
),
],
axis
=
0
,
).
reshape
(
1
,
3
)
weight
=
concat
(
[
row0
,
row1
,
Tensor
([[
0
,
0
,
1
]],
dtype
=
"float32"
,
device
=
inp
.
device
)],
axis
=
0
,
).
reshape
(
1
,
3
,
3
)
weight
=
broadcast_to
(
weight
,
(
inp
.
shape
[
0
],
3
,
3
))
else
:
hscale
=
1.0
*
ih
/
oh
wscale
=
1.0
*
iw
/
ow
row0
=
concat
(
[
wscale
,
Tensor
(
0
,
dtype
=
"float32"
,
device
=
inp
.
device
),
0.5
*
wscale
-
0.5
],
axis
=
0
,
).
reshape
(
1
,
3
)
row1
=
concat
(
[
Tensor
(
0
,
dtype
=
"float32"
,
device
=
inp
.
device
),
hscale
,
0.5
*
hscale
-
0.5
],
axis
=
0
,
).
reshape
(
1
,
3
)
weight
=
concat
(
[
row0
,
row1
,
Tensor
([[
0
,
0
,
1
]],
dtype
=
"float32"
,
device
=
inp
.
device
)],
axis
=
0
,
).
reshape
(
1
,
3
,
3
)
weight
=
broadcast_to
(
weight
,
(
inp
.
shape
[
0
],
3
,
3
))
weight
=
weight
.
astype
(
"float32"
)
ret
=
warp_perspective
(
inp
,
weight
,
dsize
,
interp_mode
=
"LINEAR"
)
if
mode
==
"LINEAR"
:
ret
=
reshape
(
ret
,
ret
.
shape
[
0
:
3
])
return
ret
imperative/python/megengine/module/identity.py
浏览文件 @
dcfb6a53
...
@@ -6,7 +6,7 @@
...
@@ -6,7 +6,7 @@
# Unless required by applicable law or agreed to in writing,
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from
..functional
import
copy
from
..functional
.tensor
import
copy
from
.module
import
Module
from
.module
import
Module
...
...
imperative/python/test/unit/core/test_autodiff.py
浏览文件 @
dcfb6a53
...
@@ -372,7 +372,7 @@ def test_interpolate_fastpath():
...
@@ -372,7 +372,7 @@ def test_interpolate_fastpath():
x
=
mge
.
Tensor
(
x_np
)
x
=
mge
.
Tensor
(
x_np
)
grad
=
Grad
().
wrt
(
x
,
callback
=
save_to
(
x
))
grad
=
Grad
().
wrt
(
x
,
callback
=
save_to
(
x
))
y
=
F
.
n
n
.
interpolate
(
x
,
size
=
(
16
,
16
),
mode
=
"BILINEAR"
)
y
=
F
.
visio
n
.
interpolate
(
x
,
size
=
(
16
,
16
),
mode
=
"BILINEAR"
)
grad
(
y
,
F
.
ones_like
(
y
))
grad
(
y
,
F
.
ones_like
(
y
))
np
.
testing
.
assert_equal
(
np
.
ones
(
x_np
.
shape
,
dtype
=
np
.
float32
)
/
4
,
x
.
grad
.
numpy
())
np
.
testing
.
assert_equal
(
np
.
ones
(
x_np
.
shape
,
dtype
=
np
.
float32
)
/
4
,
x
.
grad
.
numpy
())
...
...
imperative/python/test/unit/functional/test_functional.py
浏览文件 @
dcfb6a53
...
@@ -136,8 +136,8 @@ def test_interpolate():
...
@@ -136,8 +136,8 @@ def test_interpolate():
def
linear_interpolate
():
def
linear_interpolate
():
inp
=
tensor
(
np
.
arange
(
1
,
3
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
))
inp
=
tensor
(
np
.
arange
(
1
,
3
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
))
out
=
F
.
n
n
.
interpolate
(
inp
,
scale_factor
=
2.0
,
mode
=
"LINEAR"
)
out
=
F
.
visio
n
.
interpolate
(
inp
,
scale_factor
=
2.0
,
mode
=
"LINEAR"
)
out2
=
F
.
n
n
.
interpolate
(
inp
,
4
,
mode
=
"LINEAR"
)
out2
=
F
.
visio
n
.
interpolate
(
inp
,
4
,
mode
=
"LINEAR"
)
np
.
testing
.
assert_allclose
(
np
.
testing
.
assert_allclose
(
out
.
numpy
(),
np
.
array
([[[
1.0
,
1.25
,
1.75
,
2.0
]]],
dtype
=
np
.
float32
)
out
.
numpy
(),
np
.
array
([[[
1.0
,
1.25
,
1.75
,
2.0
]]],
dtype
=
np
.
float32
)
...
@@ -149,16 +149,16 @@ def test_interpolate():
...
@@ -149,16 +149,16 @@ def test_interpolate():
def
many_batch_interpolate
():
def
many_batch_interpolate
():
inp
=
tensor
(
np
.
arange
(
1
,
9
,
dtype
=
np
.
float32
).
reshape
(
2
,
1
,
2
,
2
))
inp
=
tensor
(
np
.
arange
(
1
,
9
,
dtype
=
np
.
float32
).
reshape
(
2
,
1
,
2
,
2
))
out
=
F
.
n
n
.
interpolate
(
inp
,
[
4
,
4
])
out
=
F
.
visio
n
.
interpolate
(
inp
,
[
4
,
4
])
out2
=
F
.
n
n
.
interpolate
(
inp
,
scale_factor
=
2.0
)
out2
=
F
.
visio
n
.
interpolate
(
inp
,
scale_factor
=
2.0
)
np
.
testing
.
assert_allclose
(
out
.
numpy
(),
out2
.
numpy
())
np
.
testing
.
assert_allclose
(
out
.
numpy
(),
out2
.
numpy
())
def
assign_corner_interpolate
():
def
assign_corner_interpolate
():
inp
=
tensor
(
np
.
arange
(
1
,
5
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
,
2
))
inp
=
tensor
(
np
.
arange
(
1
,
5
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
,
2
))
out
=
F
.
n
n
.
interpolate
(
inp
,
[
4
,
4
],
align_corners
=
True
)
out
=
F
.
visio
n
.
interpolate
(
inp
,
[
4
,
4
],
align_corners
=
True
)
out2
=
F
.
n
n
.
interpolate
(
inp
,
scale_factor
=
2.0
,
align_corners
=
True
)
out2
=
F
.
visio
n
.
interpolate
(
inp
,
scale_factor
=
2.0
,
align_corners
=
True
)
np
.
testing
.
assert_allclose
(
out
.
numpy
(),
out2
.
numpy
())
np
.
testing
.
assert_allclose
(
out
.
numpy
(),
out2
.
numpy
())
...
@@ -166,13 +166,13 @@ def test_interpolate():
...
@@ -166,13 +166,13 @@ def test_interpolate():
inp
=
tensor
(
np
.
arange
(
1
,
5
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
,
2
))
inp
=
tensor
(
np
.
arange
(
1
,
5
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
,
2
))
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
F
.
n
n
.
interpolate
(
inp
,
scale_factor
=
2.0
,
mode
=
"LINEAR"
)
F
.
visio
n
.
interpolate
(
inp
,
scale_factor
=
2.0
,
mode
=
"LINEAR"
)
def
inappropriate_scale_linear_interpolate
():
def
inappropriate_scale_linear_interpolate
():
inp
=
tensor
(
np
.
arange
(
1
,
3
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
))
inp
=
tensor
(
np
.
arange
(
1
,
3
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
))
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
F
.
n
n
.
interpolate
(
inp
,
scale_factor
=
[
2.0
,
3.0
],
mode
=
"LINEAR"
)
F
.
visio
n
.
interpolate
(
inp
,
scale_factor
=
[
2.0
,
3.0
],
mode
=
"LINEAR"
)
linear_interpolate
()
linear_interpolate
()
many_batch_interpolate
()
many_batch_interpolate
()
...
@@ -205,7 +205,7 @@ def test_roi_align():
...
@@ -205,7 +205,7 @@ def test_roi_align():
grad
=
Grad
().
wrt
(
inp_feat
,
callback
=
_save_to
(
inp_feat
))
grad
=
Grad
().
wrt
(
inp_feat
,
callback
=
_save_to
(
inp_feat
))
output_shape
=
(
7
,
7
)
output_shape
=
(
7
,
7
)
out_feat
=
F
.
n
n
.
roi_align
(
out_feat
=
F
.
visio
n
.
roi_align
(
inp_feat
,
inp_feat
,
rois
,
rois
,
output_shape
=
output_shape
,
output_shape
=
output_shape
,
...
@@ -228,7 +228,7 @@ def test_roi_pooling():
...
@@ -228,7 +228,7 @@ def test_roi_pooling():
inp_feat
,
rois
=
_gen_roi_inp
()
inp_feat
,
rois
=
_gen_roi_inp
()
grad
=
Grad
().
wrt
(
inp_feat
,
callback
=
_save_to
(
inp_feat
))
grad
=
Grad
().
wrt
(
inp_feat
,
callback
=
_save_to
(
inp_feat
))
output_shape
=
(
7
,
7
)
output_shape
=
(
7
,
7
)
out_feat
=
F
.
n
n
.
roi_pooling
(
out_feat
=
F
.
visio
n
.
roi_pooling
(
inp_feat
,
rois
,
output_shape
=
output_shape
,
mode
=
"max"
,
scale
=
1.0
/
4
,
inp_feat
,
rois
,
output_shape
=
output_shape
,
mode
=
"max"
,
scale
=
1.0
/
4
,
)
)
assert
make_shape_tuple
(
out_feat
.
shape
)
==
(
assert
make_shape_tuple
(
out_feat
.
shape
)
==
(
...
@@ -335,18 +335,18 @@ def test_interpolate_fastpath():
...
@@ -335,18 +335,18 @@ def test_interpolate_fastpath():
]
]
for
inp_shape
,
target_shape
in
test_cases
:
for
inp_shape
,
target_shape
in
test_cases
:
x
=
tensor
(
np
.
random
.
randn
(
*
inp_shape
),
dtype
=
np
.
float32
)
x
=
tensor
(
np
.
random
.
randn
(
*
inp_shape
),
dtype
=
np
.
float32
)
out
=
F
.
n
n
.
interpolate
(
x
,
target_shape
,
mode
=
"BILINEAR"
)
out
=
F
.
visio
n
.
interpolate
(
x
,
target_shape
,
mode
=
"BILINEAR"
)
assert
out
.
shape
[
0
]
==
x
.
shape
[
0
]
and
out
.
shape
[
1
]
==
x
.
shape
[
1
]
assert
out
.
shape
[
0
]
==
x
.
shape
[
0
]
and
out
.
shape
[
1
]
==
x
.
shape
[
1
]
assert
out
.
shape
[
2
]
==
target_shape
[
0
]
and
out
.
shape
[
3
]
==
target_shape
[
1
]
assert
out
.
shape
[
2
]
==
target_shape
[
0
]
and
out
.
shape
[
3
]
==
target_shape
[
1
]
# check value
# check value
x
=
tensor
(
np
.
ones
((
3
,
3
,
10
,
10
)),
dtype
=
np
.
float32
)
x
=
tensor
(
np
.
ones
((
3
,
3
,
10
,
10
)),
dtype
=
np
.
float32
)
out
=
F
.
n
n
.
interpolate
(
x
,
(
15
,
5
),
mode
=
"BILINEAR"
)
out
=
F
.
visio
n
.
interpolate
(
x
,
(
15
,
5
),
mode
=
"BILINEAR"
)
np
.
testing
.
assert_equal
(
out
.
numpy
(),
np
.
ones
((
3
,
3
,
15
,
5
)).
astype
(
np
.
float32
))
np
.
testing
.
assert_equal
(
out
.
numpy
(),
np
.
ones
((
3
,
3
,
15
,
5
)).
astype
(
np
.
float32
))
np_x
=
np
.
arange
(
32
)
np_x
=
np
.
arange
(
32
)
x
=
tensor
(
np_x
).
astype
(
np
.
float32
).
reshape
(
1
,
1
,
32
,
1
)
x
=
tensor
(
np_x
).
astype
(
np
.
float32
).
reshape
(
1
,
1
,
32
,
1
)
out
=
F
.
n
n
.
interpolate
(
x
,
(
1
,
1
),
mode
=
"BILINEAR"
)
out
=
F
.
visio
n
.
interpolate
(
x
,
(
1
,
1
),
mode
=
"BILINEAR"
)
np
.
testing
.
assert_equal
(
out
.
item
(),
np_x
.
mean
())
np
.
testing
.
assert_equal
(
out
.
item
(),
np_x
.
mean
())
...
@@ -360,7 +360,7 @@ def test_warp_perspective():
...
@@ -360,7 +360,7 @@ def test_warp_perspective():
[[
1.0
,
0.0
,
1.0
],
[
0.0
,
1.0
,
1.0
],
[
0.0
,
0.0
,
1.0
]],
dtype
=
np
.
float32
[[
1.0
,
0.0
,
1.0
],
[
0.0
,
1.0
,
1.0
],
[
0.0
,
0.0
,
1.0
]],
dtype
=
np
.
float32
).
reshape
(
M_shape
)
).
reshape
(
M_shape
)
)
)
outp
=
F
.
warp_perspective
(
x
,
M
,
(
2
,
2
))
outp
=
F
.
vision
.
warp_perspective
(
x
,
M
,
(
2
,
2
))
np
.
testing
.
assert_equal
(
np
.
testing
.
assert_equal
(
outp
.
numpy
(),
np
.
array
([[[[
5.0
,
6.0
],
[
9.0
,
10.0
]]]],
dtype
=
np
.
float32
)
outp
.
numpy
(),
np
.
array
([[[[
5.0
,
6.0
],
[
9.0
,
10.0
]]]],
dtype
=
np
.
float32
)
)
)
...
@@ -370,7 +370,7 @@ def test_warp_affine():
...
@@ -370,7 +370,7 @@ def test_warp_affine():
inp_shape
=
(
1
,
3
,
3
,
3
)
inp_shape
=
(
1
,
3
,
3
,
3
)
x
=
tensor
(
np
.
arange
(
27
,
dtype
=
np
.
float32
).
reshape
(
inp_shape
))
x
=
tensor
(
np
.
arange
(
27
,
dtype
=
np
.
float32
).
reshape
(
inp_shape
))
weightv
=
[[[
1.26666667
,
0.6
,
-
83.33333333
],
[
-
0.33333333
,
1
,
66.66666667
]]]
weightv
=
[[[
1.26666667
,
0.6
,
-
83.33333333
],
[
-
0.33333333
,
1
,
66.66666667
]]]
outp
=
F
.
warp_affine
(
x
,
tensor
(
weightv
),
(
2
,
2
),
border_mode
=
"WRAP"
)
outp
=
F
.
vision
.
warp_affine
(
x
,
tensor
(
weightv
),
(
2
,
2
),
border_mode
=
"WRAP"
)
res
=
np
.
array
(
res
=
np
.
array
(
[
[
[
[
...
@@ -393,7 +393,7 @@ def test_remap():
...
@@ -393,7 +393,7 @@ def test_remap():
[[[
1.0
,
0.0
],
[
0.0
,
1.0
]],
[[
0.0
,
1.0
],
[
0.0
,
1.0
]]],
dtype
=
np
.
float32
[[[
1.0
,
0.0
],
[
0.0
,
1.0
]],
[[
0.0
,
1.0
],
[
0.0
,
1.0
]]],
dtype
=
np
.
float32
).
reshape
(
map_xy_shape
)
).
reshape
(
map_xy_shape
)
)
)
outp
=
F
.
remap
(
inp
,
map_xy
)
outp
=
F
.
vision
.
remap
(
inp
,
map_xy
)
np
.
testing
.
assert_equal
(
np
.
testing
.
assert_equal
(
outp
.
numpy
(),
np
.
array
([[[[
1.0
,
4.0
],
[
4.0
,
4.0
]]]],
dtype
=
np
.
float32
)
outp
.
numpy
(),
np
.
array
([[[[
1.0
,
4.0
],
[
4.0
,
4.0
]]]],
dtype
=
np
.
float32
)
)
)
...
@@ -476,7 +476,7 @@ def test_nms():
...
@@ -476,7 +476,7 @@ def test_nms():
)
)
inp
=
tensor
(
x
)
inp
=
tensor
(
x
)
scores
=
tensor
([
0.5
,
0.8
,
0.9
,
0.6
],
dtype
=
np
.
float32
)
scores
=
tensor
([
0.5
,
0.8
,
0.9
,
0.6
],
dtype
=
np
.
float32
)
result
=
F
.
n
n
.
nms
(
inp
,
scores
=
scores
,
iou_thresh
=
0.5
)
result
=
F
.
visio
n
.
nms
(
inp
,
scores
=
scores
,
iou_thresh
=
0.5
)
np
.
testing
.
assert_equal
(
result
.
numpy
(),
np
.
array
([
2
,
1
,
3
],
dtype
=
np
.
int32
))
np
.
testing
.
assert_equal
(
result
.
numpy
(),
np
.
array
([
2
,
1
,
3
],
dtype
=
np
.
int32
))
...
@@ -737,7 +737,7 @@ def test_cvt_color():
...
@@ -737,7 +737,7 @@ def test_cvt_color():
inp
=
np
.
random
.
randn
(
3
,
3
,
3
,
3
).
astype
(
np
.
float32
)
inp
=
np
.
random
.
randn
(
3
,
3
,
3
,
3
).
astype
(
np
.
float32
)
out
=
np
.
expand_dims
(
rgb2gray
(
inp
),
3
).
astype
(
np
.
float32
)
out
=
np
.
expand_dims
(
rgb2gray
(
inp
),
3
).
astype
(
np
.
float32
)
x
=
tensor
(
inp
)
x
=
tensor
(
inp
)
y
=
F
.
img_proc
.
cvt_color
(
x
,
mode
=
"RGB2GRAY"
)
y
=
F
.
vision
.
cvt_color
(
x
,
mode
=
"RGB2GRAY"
)
np
.
testing
.
assert_allclose
(
y
.
numpy
(),
out
,
atol
=
1e-5
)
np
.
testing
.
assert_allclose
(
y
.
numpy
(),
out
,
atol
=
1e-5
)
...
...
imperative/python/test/unit/jit/test_tracing.py
浏览文件 @
dcfb6a53
...
@@ -360,7 +360,7 @@ def test_trace_warp_perspective():
...
@@ -360,7 +360,7 @@ def test_trace_warp_perspective():
@
trace
(
symbolic
=
True
)
@
trace
(
symbolic
=
True
)
def
f
(
x
,
M
):
def
f
(
x
,
M
):
out
=
F
.
warp_perspective
(
x
,
M
,
(
2
,
2
))
out
=
F
.
vision
.
warp_perspective
(
x
,
M
,
(
2
,
2
))
np
.
testing
.
assert_equal
(
out
.
shape
.
numpy
(),
np
.
array
([
1
,
1
,
2
,
2
]))
np
.
testing
.
assert_equal
(
out
.
shape
.
numpy
(),
np
.
array
([
1
,
1
,
2
,
2
]))
return
out
return
out
...
@@ -429,10 +429,10 @@ def test_trace_nms():
...
@@ -429,10 +429,10 @@ def test_trace_nms():
@
trace
(
symbolic
=
False
)
@
trace
(
symbolic
=
False
)
def
f
(
boxes
,
scores
):
def
f
(
boxes
,
scores
):
# with tracing, max_output must be specified
# with tracing, max_output must be specified
results
=
F
.
n
n
.
nms
(
boxes
,
scores
=
scores
,
iou_thresh
=
0.5
,
max_output
=
20
)
results
=
F
.
visio
n
.
nms
(
boxes
,
scores
=
scores
,
iou_thresh
=
0.5
,
max_output
=
20
)
# without tracing, max output can be inferred inside nms
# without tracing, max output can be inferred inside nms
with
exclude_from_trace
():
with
exclude_from_trace
():
_
=
F
.
n
n
.
nms
(
boxes
,
scores
=
scores
,
iou_thresh
=
0.5
)
_
=
F
.
visio
n
.
nms
(
boxes
,
scores
=
scores
,
iou_thresh
=
0.5
)
return
results
return
results
f
(
*
make_inputs
(
10
))
f
(
*
make_inputs
(
10
))
...
...
imperative/python/test/unit/utils/test_network_node.py
浏览文件 @
dcfb6a53
...
@@ -226,7 +226,7 @@ def test_roipooling():
...
@@ -226,7 +226,7 @@ def test_roipooling():
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
def
fwd
(
inp
,
rois
):
def
fwd
(
inp
,
rois
):
return
F
.
n
n
.
roi_pooling
(
inp
,
rois
,
(
2
,
2
),
scale
=
2.0
)
return
F
.
visio
n
.
roi_pooling
(
inp
,
rois
,
(
2
,
2
),
scale
=
2.0
)
output
=
fwd
(
inp
,
rois
)
output
=
fwd
(
inp
,
rois
)
check_pygraph_dump
(
fwd
,
[
inp
,
rois
],
[
output
])
check_pygraph_dump
(
fwd
,
[
inp
,
rois
],
[
output
])
...
@@ -315,7 +315,7 @@ def test_roialign():
...
@@ -315,7 +315,7 @@ def test_roialign():
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
def
fwd
(
inp
,
rois
):
def
fwd
(
inp
,
rois
):
return
F
.
n
n
.
roi_align
(
inp
,
rois
,
(
2
,
2
))
return
F
.
visio
n
.
roi_align
(
inp
,
rois
,
(
2
,
2
))
output
=
fwd
(
inp
,
rois
)
output
=
fwd
(
inp
,
rois
)
check_pygraph_dump
(
fwd
,
[
inp
,
rois
],
[
output
])
check_pygraph_dump
(
fwd
,
[
inp
,
rois
],
[
output
])
...
@@ -334,7 +334,7 @@ def test_warpperspective():
...
@@ -334,7 +334,7 @@ def test_warpperspective():
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
def
fwd
(
x
,
M
):
def
fwd
(
x
,
M
):
return
F
.
warp_perspective
(
x
,
M
,
(
2
,
2
))
return
F
.
vision
.
warp_perspective
(
x
,
M
,
(
2
,
2
))
result
=
fwd
(
x
,
M
)
result
=
fwd
(
x
,
M
)
check_pygraph_dump
(
fwd
,
[
x
,
M
],
[
result
])
check_pygraph_dump
(
fwd
,
[
x
,
M
],
[
result
])
...
@@ -347,7 +347,7 @@ def test_warpaffine():
...
@@ -347,7 +347,7 @@ def test_warpaffine():
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
def
fwd
(
x
,
weightv
):
def
fwd
(
x
,
weightv
):
return
F
.
warp_affine
(
x
,
weightv
,
(
2
,
2
),
border_mode
=
"WRAP"
)
return
F
.
vision
.
warp_affine
(
x
,
weightv
,
(
2
,
2
),
border_mode
=
"WRAP"
)
outp
=
fwd
(
x
,
weightv
)
outp
=
fwd
(
x
,
weightv
)
check_pygraph_dump
(
fwd
,
[
x
,
weightv
],
[
outp
])
check_pygraph_dump
(
fwd
,
[
x
,
weightv
],
[
outp
])
...
@@ -365,7 +365,7 @@ def test_remap():
...
@@ -365,7 +365,7 @@ def test_remap():
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
def
fwd
(
inp
,
map_xy
):
def
fwd
(
inp
,
map_xy
):
return
F
.
remap
(
inp
,
map_xy
)
return
F
.
vision
.
remap
(
inp
,
map_xy
)
out
=
fwd
(
inp
,
map_xy
)
out
=
fwd
(
inp
,
map_xy
)
check_pygraph_dump
(
fwd
,
[
inp
,
map_xy
],
[
out
])
check_pygraph_dump
(
fwd
,
[
inp
,
map_xy
],
[
out
])
...
@@ -376,7 +376,7 @@ def test_resize():
...
@@ -376,7 +376,7 @@ def test_resize():
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
def
fwd
(
x
):
def
fwd
(
x
):
return
F
.
n
n
.
interpolate
(
x
,
size
=
(
16
,
16
),
mode
=
"BILINEAR"
)
return
F
.
visio
n
.
interpolate
(
x
,
size
=
(
16
,
16
),
mode
=
"BILINEAR"
)
out
=
fwd
(
x
)
out
=
fwd
(
x
)
check_pygraph_dump
(
fwd
,
[
x
],
[
out
])
check_pygraph_dump
(
fwd
,
[
x
],
[
out
])
...
@@ -706,7 +706,7 @@ def test_cvtcolor():
...
@@ -706,7 +706,7 @@ def test_cvtcolor():
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
def
fwd
(
inp
):
def
fwd
(
inp
):
return
F
.
img_proc
.
cvt_color
(
inp
,
mode
=
"RGB2GRAY"
)
return
F
.
vision
.
cvt_color
(
inp
,
mode
=
"RGB2GRAY"
)
result
=
fwd
(
x
)
result
=
fwd
(
x
)
check_pygraph_dump
(
fwd
,
[
x
],
[
result
])
check_pygraph_dump
(
fwd
,
[
x
],
[
result
])
imperative/src/impl/ops/
img_proc
.cpp
→
imperative/src/impl/ops/
vision
.cpp
浏览文件 @
dcfb6a53
/**
/**
* \file imperative/src/impl/ops/
img_proc
.cpp
* \file imperative/src/impl/ops/
vision
.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
...
@@ -31,4 +31,4 @@ OP_TRAIT_REG(CvtColor, CvtColor)
...
@@ -31,4 +31,4 @@ OP_TRAIT_REG(CvtColor, CvtColor)
.
fallback
();
.
fallback
();
}
}
}
}
}
}
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录