提交 32b2c45c 编写于 作者: M Megvii Engine Team

fix(ci): fix the problem that torch not found

GitOrigin-RevId: f8671d931b91d651193b56142f31f8d66505f21e
上级 266263dc
...@@ -5,7 +5,6 @@ from tempfile import NamedTemporaryFile ...@@ -5,7 +5,6 @@ from tempfile import NamedTemporaryFile
import numpy as np import numpy as np
import pytest import pytest
import torch
from utils import make_tensor from utils import make_tensor
import megengine import megengine
...@@ -624,25 +623,6 @@ def test_advance_indexing_with_bool(test_varnode): ...@@ -624,25 +623,6 @@ def test_advance_indexing_with_bool(test_varnode):
) )
def test_advance_indexing_autodiff():
x = Tensor([2, 2, 3, 4, 5, 6, 7, 8, 2], dtype="float32")
gm = GradManager()
gm.attach(x)
with gm:
a = x + 1
a[x > 3] = 0.3
b = a + 1
gm.backward(b.sum())
torch_x = torch.tensor(
[2, 2, 3, 4, 5, 6, 7, 8, 2], dtype=torch.float32, requires_grad=True
)
a = torch_x + 1
a[torch_x > 3] = 0.3
b = a + 1
(b.sum()).backward()
np.testing.assert_equal(x.grad.numpy(), torch_x.grad.numpy())
@pytest.mark.parametrize("symbolic", [True, False, None]) @pytest.mark.parametrize("symbolic", [True, False, None])
def test_subtensor_on_empty_tensor(symbolic): def test_subtensor_on_empty_tensor(symbolic):
np_x = np.array([], dtype=np.float32).reshape(10, 0, 10) np_x = np.array([], dtype=np.float32).reshape(10, 0, 10)
......
...@@ -227,80 +227,6 @@ def test_split_basic(is_varnode): ...@@ -227,80 +227,6 @@ def test_split_basic(is_varnode):
set_symbolic_shape(saved_symbolic_shape) set_symbolic_shape(saved_symbolic_shape)
def test_concat_and_stack():
import copy
from megengine.autodiff import GradManager
import torch
def generate_test_data(max_nr_inp, max_dim, max_dim_len, test_concat=True):
nr_inp = np.random.randint(1, max_nr_inp) if max_nr_inp > 1 else 1
dims = np.random.randint(1, max_dim)
cat_axis = (
np.random.randint(-dims, dims)
if test_concat
else np.random.randint(-dims - 1, dims + 1)
)
ishape = [np.random.randint(0, max_dim_len) for _ in range(dims)]
ishapes = [copy.deepcopy(ishape) for _ in range(nr_inp)]
if test_concat:
for i in range(nr_inp):
ishapes[i][cat_axis] = np.random.randint(0, max_dim_len)
inp_nps = []
for ishape in ishapes:
inp_nps.append(np.random.randn(*ishape))
return inp_nps, cat_axis
def test_impl(max_nr_inp, max_dim, max_dim_len, test_concat):
inp_nps, cat_axis = generate_test_data(
max_nr_inp, max_dim, max_dim_len, test_concat
)
inp_mges = [Tensor(inp_np) for inp_np in inp_nps]
inp_torchs = [torch.tensor(inp_np, requires_grad=True) for inp_np in inp_nps]
if test_concat:
np_func, mge_func, torch_func = np.concatenate, F.concat, torch.cat
else:
np_func, mge_func, torch_func = np.stack, F.stack, torch.stack
res_np = np_func(inp_nps, axis=cat_axis)
grad_np = np.random.randn(*res_np.shape).astype(np.float32)
gm = GradManager().attach(inp_mges)
with gm:
res_mge = mge_func(inp_mges, axis=cat_axis)
gm.backward(res_mge, Tensor(grad_np))
res_torch = torch_func(inp_torchs, dim=cat_axis)
res_torch.backward(torch.tensor(grad_np))
np.testing.assert_allclose(res_mge.numpy(), res_torch.detach().cpu().numpy())
for inp_mge, inp_torch in zip(inp_mges, inp_torchs):
np.testing.assert_allclose(
inp_mge.grad.numpy(), inp_torch.grad.detach().cpu().numpy()
)
def test_concat(max_nr_inp, max_dim, max_dim_len):
test_impl(max_nr_inp, max_dim, max_dim_len, test_concat=True)
def test_stack(max_nr_inp, max_dim, max_dim_len):
test_impl(max_nr_inp, max_dim, max_dim_len, test_concat=False)
# test only one input
test_concat(1, 7, 16)
test_stack(1, 7, 16)
# test zero shape
test_concat(10, 7, 1)
test_stack(10, 7, 1)
for _ in range(3):
test_concat(10, 7, 16)
for _ in range(3):
test_stack(10, 7, 16)
@pytest.mark.parametrize("symbolic", [None, False, True]) @pytest.mark.parametrize("symbolic", [None, False, True])
def test_split(symbolic): def test_split(symbolic):
x = Tensor(np.random.random((10, 20)), dtype=np.float32) x = Tensor(np.random.random((10, 20)), dtype=np.float32)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册