From 7d8a7e3efeb2e24ca9506949a3f79e500bfa00af Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Sat, 10 Oct 2020 19:40:58 +0800 Subject: [PATCH] feat(mge): do not export F.loss.* GitOrigin-RevId: a3ce4d3d76de46d28ef3b13d71ed00b93dd533df --- imperative/python/megengine/functional/__init__.py | 1 - imperative/python/megengine/functional/loss.py | 10 +++++----- imperative/python/megengine/functional/nn.py | 3 +-- imperative/python/test/integration/test_converge.py | 2 +- imperative/python/test/integration/test_correctness.py | 2 +- .../python/test/integration/test_dp_correctness.py | 2 +- imperative/python/test/integration/test_trace_dump.py | 4 ++-- .../python/test/unit/functional/test_functional.py | 10 ++++++---- imperative/python/test/unit/functional/test_loss.py | 8 ++++---- 9 files changed, 21 insertions(+), 21 deletions(-) diff --git a/imperative/python/megengine/functional/__init__.py b/imperative/python/megengine/functional/__init__.py index 4d70c8a68..4dc2675aa 100644 --- a/imperative/python/megengine/functional/__init__.py +++ b/imperative/python/megengine/functional/__init__.py @@ -8,7 +8,6 @@ # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # pylint: disable=redefined-builtin from .elemwise import * -from .loss import * from .math import * from .nn import * from .quantized import conv_bias_activation diff --git a/imperative/python/megengine/functional/loss.py b/imperative/python/megengine/functional/loss.py index e5e1b6a5d..8ed5958a4 100644 --- a/imperative/python/megengine/functional/loss.py +++ b/imperative/python/megengine/functional/loss.py @@ -55,7 +55,7 @@ def l1_loss(pred: Tensor, label: Tensor) -> Tensor: ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32)) tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32)) - loss = F.l1_loss(ipt, tgt) + loss = F.nn.l1_loss(ipt, tgt) print(loss.numpy()) Outputs: @@ -106,7 +106,7 @@ def square_loss(pred: Tensor, label: Tensor) -> Tensor: ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32)) tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32)) - loss = F.square_loss(ipt, tgt) + loss = F.nn.square_loss(ipt, tgt) print(loss.numpy()) Outputs: @@ -159,7 +159,7 @@ def cross_entropy( label_shape = (1, ) pred = tensor(np.array([0, 0], dtype=np.float32).reshape(data_shape)) label = tensor(np.ones(label_shape, dtype=np.int32)) - loss = F.cross_entropy(pred, label) + loss = F.nn.cross_entropy(pred, label) print(loss.numpy()) Outputs: @@ -226,7 +226,7 @@ def binary_cross_entropy( pred = tensor(np.array([0, 0], dtype=np.float32).reshape(1, 2)) label = tensor(np.ones((1, 2), dtype=np.float32)) - loss = F.binary_cross_entropy(pred, label) + loss = F.nn.binary_cross_entropy(pred, label) print(loss.numpy()) Outputs: @@ -264,7 +264,7 @@ def hinge_loss(pred: Tensor, label: Tensor, norm: str = "L1") -> Tensor: pred = tensor([[0.5, -0.5, 0.1], [-0.6, 0.7, 0.8]], dtype="float32") label = tensor([[1, -1, -1], [-1, 1, 1]], dtype="float32") - loss = F.hinge_loss(pred, label) + loss = F.nn.hinge_loss(pred, label) print(loss.numpy()) Outputs: diff --git a/imperative/python/megengine/functional/nn.py b/imperative/python/megengine/functional/nn.py index 1e62901d9..d146ce678 100644 --- a/imperative/python/megengine/functional/nn.py +++ b/imperative/python/megengine/functional/nn.py @@ -1522,5 +1522,4 @@ def nms(boxes: Tensor, scores: Tensor, iou_thresh: float) -> Tensor: -from .loss import * # isort:skip -from .quantized import conv_bias_activation # isort:skip +from .loss import * diff --git a/imperative/python/test/integration/test_converge.py b/imperative/python/test/integration/test_converge.py index 0815ca634..d16570c7f 100644 --- a/imperative/python/test/integration/test_converge.py +++ b/imperative/python/test/integration/test_converge.py @@ -80,7 +80,7 @@ def test_training_converge(): def train(data, label): with gm: pred = net(data) - loss = F.cross_entropy(pred, label) + loss = F.nn.cross_entropy(pred, label) gm.backward(loss) return loss diff --git a/imperative/python/test/integration/test_correctness.py b/imperative/python/test/integration/test_correctness.py index 9dd31cc00..d33bde89e 100644 --- a/imperative/python/test/integration/test_correctness.py +++ b/imperative/python/test/integration/test_correctness.py @@ -92,7 +92,7 @@ class MnistNet(Module): def train(data, label, net, opt, gm): with gm: pred = net(data) - loss = F.cross_entropy(pred, label) + loss = F.nn.cross_entropy(pred, label) gm.backward(loss) return loss diff --git a/imperative/python/test/integration/test_dp_correctness.py b/imperative/python/test/integration/test_dp_correctness.py index c47742724..3491cf5f3 100644 --- a/imperative/python/test/integration/test_dp_correctness.py +++ b/imperative/python/test/integration/test_dp_correctness.py @@ -98,7 +98,7 @@ def train(data, label, net, opt, gm): opt.clear_grad() with gm: pred = net(data) - loss = F.cross_entropy(pred, label) + loss = F.nn.cross_entropy(pred, label) gm.backward(loss) opt.step() return loss diff --git a/imperative/python/test/integration/test_trace_dump.py b/imperative/python/test/integration/test_trace_dump.py index 742803b45..37cb4fd46 100644 --- a/imperative/python/test/integration/test_trace_dump.py +++ b/imperative/python/test/integration/test_trace_dump.py @@ -72,7 +72,7 @@ def test_xornet_trace_dump(): with gm: net.train() pred = net(data) - loss = F.cross_entropy(pred, label) + loss = F.nn.cross_entropy(pred, label) gm.backward(loss) return pred, loss @@ -80,7 +80,7 @@ def test_xornet_trace_dump(): def val_fun(data, label): net.eval() pred = net(data) - loss = F.cross_entropy(pred, label) + loss = F.nn.cross_entropy(pred, label) return pred, loss @trace(symbolic=True, capture_as_const=True) diff --git a/imperative/python/test/unit/functional/test_functional.py b/imperative/python/test/unit/functional/test_functional.py index 09ccdc3bf..25f96a262 100644 --- a/imperative/python/test/unit/functional/test_functional.py +++ b/imperative/python/test/unit/functional/test_functional.py @@ -317,14 +317,16 @@ def test_binary_cross_entropy(): {"input": [data1, label1], "output": expect1,}, {"input": [data2, label2], "output": expect2,}, ] - opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn) + opr_test(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn) cases = [ {"input": [sigmoid(data1), label1], "output": expect1,}, {"input": [sigmoid(data2), label2], "output": expect2,}, ] opr_test( - cases, partial(F.binary_cross_entropy, with_logits=False), compare_fn=compare_fn + cases, + partial(F.nn.binary_cross_entropy, with_logits=False), + compare_fn=compare_fn, ) @@ -338,7 +340,7 @@ def test_hinge_loss(): expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean() cases.append({"input": [data, label], "output": expect}) - opr_test(cases, F.hinge_loss) + opr_test(cases, F.nn.hinge_loss) # cases with L2 norm cases = [] @@ -349,7 +351,7 @@ def test_hinge_loss(): cases.append({"input": [data, label], "output": expect}) def hinge_loss_with_l2_norm(pred, label): - return F.hinge_loss(pred, label, "L2") + return F.nn.hinge_loss(pred, label, "L2") opr_test(cases, hinge_loss_with_l2_norm) diff --git a/imperative/python/test/unit/functional/test_loss.py b/imperative/python/test/unit/functional/test_loss.py index 8464a3a21..8bfd1cd5e 100644 --- a/imperative/python/test/unit/functional/test_loss.py +++ b/imperative/python/test/unit/functional/test_loss.py @@ -15,14 +15,14 @@ from megengine import tensor def test_cross_entropy_with_logits(): data = tensor([1, 100]).astype(np.float32).reshape((1, 2)) label = tensor([1]).astype(np.int32) - loss = F.cross_entropy(data, label) + loss = F.nn.cross_entropy(data, label) np.testing.assert_allclose(loss.numpy(), 0.0) label = tensor([0]).astype(np.int32) - loss = F.cross_entropy(data, label) + loss = F.nn.cross_entropy(data, label) np.testing.assert_allclose(loss.numpy(), 100 - 1) label = np.array([1]) - loss = F.cross_entropy(data, label) + loss = F.nn.cross_entropy(data, label) np.testing.assert_allclose(loss.numpy(), 0.0) @@ -41,5 +41,5 @@ def test_cross_entropy(): x[i, y[i]] += np.random.rand() * 2 x = softmax(x) l_ref = ref(x, y) - l = F.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False) + l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False) np.testing.assert_allclose(l.numpy(), l_ref) -- GitLab