Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
7d8a7e3e
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
404
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
7d8a7e3e
编写于
10月 10, 2020
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
feat(mge): do not export F.loss.*
GitOrigin-RevId: a3ce4d3d76de46d28ef3b13d71ed00b93dd533df
上级
480375fb
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
21 addition
and
21 deletion
+21
-21
imperative/python/megengine/functional/__init__.py
imperative/python/megengine/functional/__init__.py
+0
-1
imperative/python/megengine/functional/loss.py
imperative/python/megengine/functional/loss.py
+5
-5
imperative/python/megengine/functional/nn.py
imperative/python/megengine/functional/nn.py
+1
-2
imperative/python/test/integration/test_converge.py
imperative/python/test/integration/test_converge.py
+1
-1
imperative/python/test/integration/test_correctness.py
imperative/python/test/integration/test_correctness.py
+1
-1
imperative/python/test/integration/test_dp_correctness.py
imperative/python/test/integration/test_dp_correctness.py
+1
-1
imperative/python/test/integration/test_trace_dump.py
imperative/python/test/integration/test_trace_dump.py
+2
-2
imperative/python/test/unit/functional/test_functional.py
imperative/python/test/unit/functional/test_functional.py
+6
-4
imperative/python/test/unit/functional/test_loss.py
imperative/python/test/unit/functional/test_loss.py
+4
-4
未找到文件。
imperative/python/megengine/functional/__init__.py
浏览文件 @
7d8a7e3e
...
...
@@ -8,7 +8,6 @@
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=redefined-builtin
from
.elemwise
import
*
from
.loss
import
*
from
.math
import
*
from
.nn
import
*
from
.quantized
import
conv_bias_activation
...
...
imperative/python/megengine/functional/loss.py
浏览文件 @
7d8a7e3e
...
...
@@ -55,7 +55,7 @@ def l1_loss(pred: Tensor, label: Tensor) -> Tensor:
ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32))
tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32))
loss = F.l1_loss(ipt, tgt)
loss = F.
nn.
l1_loss(ipt, tgt)
print(loss.numpy())
Outputs:
...
...
@@ -106,7 +106,7 @@ def square_loss(pred: Tensor, label: Tensor) -> Tensor:
ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32))
tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32))
loss = F.square_loss(ipt, tgt)
loss = F.
nn.
square_loss(ipt, tgt)
print(loss.numpy())
Outputs:
...
...
@@ -159,7 +159,7 @@ def cross_entropy(
label_shape = (1, )
pred = tensor(np.array([0, 0], dtype=np.float32).reshape(data_shape))
label = tensor(np.ones(label_shape, dtype=np.int32))
loss = F.cross_entropy(pred, label)
loss = F.
nn.
cross_entropy(pred, label)
print(loss.numpy())
Outputs:
...
...
@@ -226,7 +226,7 @@ def binary_cross_entropy(
pred = tensor(np.array([0, 0], dtype=np.float32).reshape(1, 2))
label = tensor(np.ones((1, 2), dtype=np.float32))
loss = F.binary_cross_entropy(pred, label)
loss = F.
nn.
binary_cross_entropy(pred, label)
print(loss.numpy())
Outputs:
...
...
@@ -264,7 +264,7 @@ def hinge_loss(pred: Tensor, label: Tensor, norm: str = "L1") -> Tensor:
pred = tensor([[0.5, -0.5, 0.1], [-0.6, 0.7, 0.8]], dtype="float32")
label = tensor([[1, -1, -1], [-1, 1, 1]], dtype="float32")
loss = F.hinge_loss(pred, label)
loss = F.
nn.
hinge_loss(pred, label)
print(loss.numpy())
Outputs:
...
...
imperative/python/megengine/functional/nn.py
浏览文件 @
7d8a7e3e
...
...
@@ -1522,5 +1522,4 @@ def nms(boxes: Tensor, scores: Tensor, iou_thresh: float) -> Tensor:
from
.loss
import
*
# isort:skip
from
.quantized
import
conv_bias_activation
# isort:skip
from
.loss
import
*
imperative/python/test/integration/test_converge.py
浏览文件 @
7d8a7e3e
...
...
@@ -80,7 +80,7 @@ def test_training_converge():
def
train
(
data
,
label
):
with
gm
:
pred
=
net
(
data
)
loss
=
F
.
cross_entropy
(
pred
,
label
)
loss
=
F
.
nn
.
cross_entropy
(
pred
,
label
)
gm
.
backward
(
loss
)
return
loss
...
...
imperative/python/test/integration/test_correctness.py
浏览文件 @
7d8a7e3e
...
...
@@ -92,7 +92,7 @@ class MnistNet(Module):
def
train
(
data
,
label
,
net
,
opt
,
gm
):
with
gm
:
pred
=
net
(
data
)
loss
=
F
.
cross_entropy
(
pred
,
label
)
loss
=
F
.
nn
.
cross_entropy
(
pred
,
label
)
gm
.
backward
(
loss
)
return
loss
...
...
imperative/python/test/integration/test_dp_correctness.py
浏览文件 @
7d8a7e3e
...
...
@@ -98,7 +98,7 @@ def train(data, label, net, opt, gm):
opt
.
clear_grad
()
with
gm
:
pred
=
net
(
data
)
loss
=
F
.
cross_entropy
(
pred
,
label
)
loss
=
F
.
nn
.
cross_entropy
(
pred
,
label
)
gm
.
backward
(
loss
)
opt
.
step
()
return
loss
...
...
imperative/python/test/integration/test_trace_dump.py
浏览文件 @
7d8a7e3e
...
...
@@ -72,7 +72,7 @@ def test_xornet_trace_dump():
with
gm
:
net
.
train
()
pred
=
net
(
data
)
loss
=
F
.
cross_entropy
(
pred
,
label
)
loss
=
F
.
nn
.
cross_entropy
(
pred
,
label
)
gm
.
backward
(
loss
)
return
pred
,
loss
...
...
@@ -80,7 +80,7 @@ def test_xornet_trace_dump():
def
val_fun
(
data
,
label
):
net
.
eval
()
pred
=
net
(
data
)
loss
=
F
.
cross_entropy
(
pred
,
label
)
loss
=
F
.
nn
.
cross_entropy
(
pred
,
label
)
return
pred
,
loss
@
trace
(
symbolic
=
True
,
capture_as_const
=
True
)
...
...
imperative/python/test/unit/functional/test_functional.py
浏览文件 @
7d8a7e3e
...
...
@@ -317,14 +317,16 @@ def test_binary_cross_entropy():
{
"input"
:
[
data1
,
label1
],
"output"
:
expect1
,},
{
"input"
:
[
data2
,
label2
],
"output"
:
expect2
,},
]
opr_test
(
cases
,
F
.
binary_cross_entropy
,
compare_fn
=
compare_fn
)
opr_test
(
cases
,
F
.
nn
.
binary_cross_entropy
,
compare_fn
=
compare_fn
)
cases
=
[
{
"input"
:
[
sigmoid
(
data1
),
label1
],
"output"
:
expect1
,},
{
"input"
:
[
sigmoid
(
data2
),
label2
],
"output"
:
expect2
,},
]
opr_test
(
cases
,
partial
(
F
.
binary_cross_entropy
,
with_logits
=
False
),
compare_fn
=
compare_fn
cases
,
partial
(
F
.
nn
.
binary_cross_entropy
,
with_logits
=
False
),
compare_fn
=
compare_fn
,
)
...
...
@@ -338,7 +340,7 @@ def test_hinge_loss():
expect
=
np
.
clip
(
0
,
np
.
inf
,
1
-
data
*
label
).
sum
(
axis
=
1
).
mean
()
cases
.
append
({
"input"
:
[
data
,
label
],
"output"
:
expect
})
opr_test
(
cases
,
F
.
hinge_loss
)
opr_test
(
cases
,
F
.
nn
.
hinge_loss
)
# cases with L2 norm
cases
=
[]
...
...
@@ -349,7 +351,7 @@ def test_hinge_loss():
cases
.
append
({
"input"
:
[
data
,
label
],
"output"
:
expect
})
def
hinge_loss_with_l2_norm
(
pred
,
label
):
return
F
.
hinge_loss
(
pred
,
label
,
"L2"
)
return
F
.
nn
.
hinge_loss
(
pred
,
label
,
"L2"
)
opr_test
(
cases
,
hinge_loss_with_l2_norm
)
...
...
imperative/python/test/unit/functional/test_loss.py
浏览文件 @
7d8a7e3e
...
...
@@ -15,14 +15,14 @@ from megengine import tensor
def
test_cross_entropy_with_logits
():
data
=
tensor
([
1
,
100
]).
astype
(
np
.
float32
).
reshape
((
1
,
2
))
label
=
tensor
([
1
]).
astype
(
np
.
int32
)
loss
=
F
.
cross_entropy
(
data
,
label
)
loss
=
F
.
nn
.
cross_entropy
(
data
,
label
)
np
.
testing
.
assert_allclose
(
loss
.
numpy
(),
0.0
)
label
=
tensor
([
0
]).
astype
(
np
.
int32
)
loss
=
F
.
cross_entropy
(
data
,
label
)
loss
=
F
.
nn
.
cross_entropy
(
data
,
label
)
np
.
testing
.
assert_allclose
(
loss
.
numpy
(),
100
-
1
)
label
=
np
.
array
([
1
])
loss
=
F
.
cross_entropy
(
data
,
label
)
loss
=
F
.
nn
.
cross_entropy
(
data
,
label
)
np
.
testing
.
assert_allclose
(
loss
.
numpy
(),
0.0
)
...
...
@@ -41,5 +41,5 @@ def test_cross_entropy():
x
[
i
,
y
[
i
]]
+=
np
.
random
.
rand
()
*
2
x
=
softmax
(
x
)
l_ref
=
ref
(
x
,
y
)
l
=
F
.
cross_entropy
(
tensor
(
x
,
"float32"
),
tensor
(
y
,
"int32"
),
with_logits
=
False
)
l
=
F
.
nn
.
cross_entropy
(
tensor
(
x
,
"float32"
),
tensor
(
y
,
"int32"
),
with_logits
=
False
)
np
.
testing
.
assert_allclose
(
l
.
numpy
(),
l_ref
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录