Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
715009b5
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
715009b5
编写于
9月 14, 2020
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor(mge/api): remove external, dropout fix
GitOrigin-RevId: 5e6ff1a372522be2e7af85a55edf038f0520ddab
上级
9005cf74
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
48 addition
and
203 deletion
+48
-203
imperative/python/megengine/functional/nn.py
imperative/python/megengine/functional/nn.py
+1
-1
imperative/python/megengine/module/dropout.py
imperative/python/megengine/module/dropout.py
+1
-1
imperative/python/megengine/module/external.py
imperative/python/megengine/module/external.py
+0
-56
imperative/python/test/unit/functional/test_functional.py
imperative/python/test/unit/functional/test_functional.py
+46
-55
imperative/python/test/unit/functional/test_math.py
imperative/python/test/unit/functional/test_math.py
+0
-90
未找到文件。
imperative/python/megengine/functional/nn.py
浏览文件 @
715009b5
...
...
@@ -1226,7 +1226,7 @@ def dropout(inp: Tensor, drop_prob: float, training: bool = True) -> Tensor:
"""
assert
0
<=
drop_prob
<
1
rv
=
uniform
(
inp
.
shape
)
rv
=
uniform
(
size
=
inp
.
shape
)
mask
=
rv
>
drop_prob
inp
*=
mask
.
astype
(
inp
.
dtype
)
if
training
:
...
...
imperative/python/megengine/module/dropout.py
浏览文件 @
715009b5
...
...
@@ -25,6 +25,6 @@ class Dropout(Module):
def
forward
(
self
,
inputs
):
if
self
.
training
:
return
dropout
(
inputs
,
self
.
drop_prob
,
rescale
=
True
)
return
dropout
(
inputs
,
self
.
drop_prob
,
training
=
True
)
else
:
return
inputs
imperative/python/megengine/module/external.py
已删除
100644 → 0
浏览文件 @
9005cf74
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import
numpy
as
np
from
..functional
import
cambricon_subgraph
,
extern_opr_subgraph
from
.module
import
Module
class
CambriconSubgraph
(
Module
):
r
"""Load a serialized Cambricon subgraph.
See :func:`~.cambricon_subgraph` for more details.
"""
def
__init__
(
self
,
data
,
symbol
,
tensor_dim_mutable
,
):
super
(
CambriconSubgraph
,
self
).
__init__
()
self
.
_data
=
data
self
.
symbol
=
symbol
self
.
tensor_dim_mutable
=
tensor_dim_mutable
@
property
def
data
(
self
):
return
self
.
_data
.
tobytes
()
@
data
.
setter
def
data
(
self
,
val
):
self
.
_data
=
np
.
frombuffer
(
val
,
dtype
=
np
.
uint8
)
def
forward
(
self
,
inputs
):
outputs
=
cambricon_subgraph
(
inputs
,
self
.
_data
,
self
.
symbol
,
self
.
tensor_dim_mutable
,
)
return
outputs
class
ExternOprSubgraph
(
Module
):
r
"""Load a serialized extern opr subgraph.
"""
def
__init__
(
self
,
data
,
name
,
output_shapes
):
super
(
ExternOprSubgraph
,
self
).
__init__
()
self
.
data
=
data
self
.
name
=
name
self
.
output_shapes
=
output_shapes
def
forward
(
self
,
inputs
):
outputs
=
extern_opr_subgraph
(
inputs
,
self
.
output_shapes
,
self
.
name
,
self
.
data
,)
return
outputs
imperative/python/test/unit/functional/test_functional.py
浏览文件 @
715009b5
...
...
@@ -113,6 +113,52 @@ def test_where():
opr_test
(
cases
,
F
.
where
,
ref_fn
=
np
.
where
)
def
test_dropout
():
data
=
tensor
(
np
.
ones
(
10
,
dtype
=
np
.
float32
))
out
=
F
.
dropout
(
data
,
1.0
/
3.0
,
training
=
False
)
assert
out
.
numpy
().
sum
()
>=
0.0
def
test_matmul
():
shape1
=
3
shape2
=
3
shape3
=
(
3
,
5
)
shape4
=
(
5
,
6
)
data1
=
np
.
random
.
random
(
shape1
).
astype
(
"float32"
)
data2
=
np
.
random
.
random
(
shape2
).
astype
(
"float32"
)
data3
=
np
.
random
.
random
(
shape3
).
astype
(
"float32"
)
data4
=
np
.
random
.
random
(
shape4
).
astype
(
"float32"
)
cases
=
[
{
"input"
:
[
data1
,
data2
]},
{
"input"
:
[
data2
,
data3
]},
{
"input"
:
[
data3
,
data4
]},
]
opr_test
(
cases
,
F
.
matmul
,
ref_fn
=
np
.
matmul
)
batch_size
=
10
shape1
=
(
batch_size
,
2
,
3
)
shape2
=
(
batch_size
,
3
,
4
)
shape3
=
(
batch_size
,
10
,
4
,
5
)
data1
=
np
.
random
.
random
(
shape1
).
astype
(
"float32"
)
data2
=
np
.
random
.
random
(
shape2
).
astype
(
"float32"
)
data3
=
np
.
random
.
random
(
shape3
).
astype
(
"float32"
)
cases
=
[{
"input"
:
[
data1
,
data2
]},
{
"input"
:
[
data2
,
data3
]}]
for
i
in
range
(
0
,
batch_size
):
def
compare_fn
(
x
,
y
):
x
.
numpy
()[
i
,
...]
==
y
opr_test
(
cases
,
F
.
matmul
,
compare_fn
=
compare_fn
,
ref_fn
=
lambda
x
,
y
:
np
.
matmul
(
x
[
i
,
...],
y
[
i
,
...]),
)
def
test_interpolate
():
def
linear_interpolate
():
inp
=
tensor
(
np
.
arange
(
1
,
3
,
dtype
=
np
.
float32
).
reshape
(
1
,
1
,
2
))
...
...
@@ -281,48 +327,6 @@ def test_add_update_params():
assertTensorClose
(
res
.
numpy
(),
b
+
1
)
# def test_cross_entropy_with_softmax():
# data1_shape = (1, 2)
# label1_shape = (1,)
# data2_shape = (1, 3)
# label2_shape = (1,)
# data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
# label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
# expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
# data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
# label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
# expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
# cases = [
# {"input": [data1, label1], "output": expect1,},
# {"input": [data2, label2], "output": expect2,},
# ]
# opr_test(cases, F.cross_entropy_with_softmax)
# def test_cross_entropy():
# data1_shape = (1, 2)
# label1_shape = (1,)
# data2_shape = (1, 3)
# label2_shape = (1,)
# data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
# label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
# expect1 = np.array([-np.log(0.5)], dtype=np.float32)
# data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
# label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
# expect2 = np.array([-np.log(0.4)], dtype=np.float32)
# cases = [
# {"input": [data1, label1], "output": expect1,},
# {"input": [data2, label2], "output": expect2,},
# ]
# opr_test(cases, F.cross_entropy)
def
test_binary_cross_entropy
():
data1_shape
=
(
2
,
2
)
label1_shape
=
(
2
,
2
)
...
...
@@ -413,19 +417,6 @@ def test_batched_nms():
np
.
testing
.
assert_equal
(
results
.
numpy
(),
np
.
array
([
1
,
4
,
5
],
dtype
=
np
.
int32
))
# def test_smooth_l1_loss():
# np.random.seed(123)
# cases = []
# for shape in [(2, 2), (2, 3)]:
# data = np.random.uniform(size=shape).astype(np.float32)
# label = np.random.uniform(size=shape).astype(np.float32)
# diff = np.abs(data - label)
# expect = np.where(diff < 1, 0.5 * diff ** 2, diff - 0.5).mean()
# cases.append({"input": [data, label], "output": tensor(expect)})
# opr_test(cases, F.smooth_l1_loss)
def
test_conv_bias
():
inp_scale
=
1.5
w_scale
=
2.5
...
...
imperative/python/test/unit/functional/test_math.py
浏览文件 @
715009b5
...
...
@@ -203,93 +203,3 @@ def test_normalize():
cases
[
0
][
"input"
][
0
,
0
,
0
,
:]
=
0
cases
[
1
][
"input"
][
0
,
0
,
0
,
:]
=
0
opr_test
(
cases
,
partial
(
F
.
normalize
,
axis
=
3
),
ref_fn
=
partial
(
np_normalize
,
axis
=
3
))
def
test_matmul
():
shape1
=
3
shape2
=
3
shape3
=
(
3
,
5
)
shape4
=
(
5
,
6
)
data1
=
np
.
random
.
random
(
shape1
).
astype
(
"float32"
)
data2
=
np
.
random
.
random
(
shape2
).
astype
(
"float32"
)
data3
=
np
.
random
.
random
(
shape3
).
astype
(
"float32"
)
data4
=
np
.
random
.
random
(
shape4
).
astype
(
"float32"
)
cases
=
[
{
"input"
:
[
data1
,
data2
]},
{
"input"
:
[
data2
,
data3
]},
{
"input"
:
[
data3
,
data4
]},
]
opr_test
(
cases
,
F
.
matmul
,
ref_fn
=
np
.
matmul
)
batch_size
=
10
shape1
=
(
batch_size
,
2
,
3
)
shape2
=
(
batch_size
,
3
,
4
)
shape3
=
(
batch_size
,
10
,
4
,
5
)
data1
=
np
.
random
.
random
(
shape1
).
astype
(
"float32"
)
data2
=
np
.
random
.
random
(
shape2
).
astype
(
"float32"
)
data3
=
np
.
random
.
random
(
shape3
).
astype
(
"float32"
)
cases
=
[{
"input"
:
[
data1
,
data2
]},
{
"input"
:
[
data2
,
data3
]}]
for
i
in
range
(
0
,
batch_size
):
def
compare_fn
(
x
,
y
):
x
.
numpy
()[
i
,
...]
==
y
opr_test
(
cases
,
F
.
matmul
,
compare_fn
=
compare_fn
,
ref_fn
=
lambda
x
,
y
:
np
.
matmul
(
x
[
i
,
...],
y
[
i
,
...]),
)
# def test_logsumexp():
# x = np.arange(10).astype(np.float32)
# expected = np.log(np.sum(np.exp(x)))
# cases = [{"input": x, "output": expected}]
# compare_fn = partial(assertTensorClose, allow_special_values=True)
# # large value check
# n = 100
# x = np.full(n, 10000, dtype=np.float32)
# expected = 10000 + np.log(n)
# cases.append({"input": x, "output": expected.astype(np.float32)})
# opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
# # special value check
# x = np.array([np.inf], dtype=np.float32)
# expected = x
# cases = [{"input": x, "output": expected}]
# x = np.array([-np.inf, 0.0], dtype=np.float32)
# expected = np.zeros(1).astype(np.float32)
# cases.append({"input": x, "output": expected})
# opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
# x = np.array([np.nan], dtype=np.float32)
# expected = x
# cases = [{"input": x, "output": expected}]
# x = np.array([-np.inf, 1], dtype=np.float32)
# expected = np.array([1.0], dtype=np.float32)
# cases.append({"input": x, "output": expected})
# opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
# # keepdims check
# x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
# expected = np.array([[1e10], [-1e10]], dtype=np.float32)
# cases = [{"input": x, "output": expected}]
# x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]], dtype=np.float32)
# expected = np.array([[1e10], [np.inf]], dtype=np.float32)
# cases.append({"input": x, "output": expected})
# opr_test(cases, F.logsumexp, axis=1, keepdims=True, compare_fn=compare_fn)
# # multiple axes check
# x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
# expected = np.array([1e10], dtype=np.float32)
# cases = [{"input": x, "output": expected}]
# x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]], dtype=np.float32)
# expected = np.array([np.inf], dtype=np.float32)
# cases.append({"input": x, "output": expected})
# opr_test(cases, F.logsumexp, axis=(0, 1), keepdims=False, compare_fn=compare_fn)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录