From 55cbab7a7e625c7e8d0bd0a4c34a8445df906385 Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Thu, 19 Jan 2023 15:27:15 +0800 Subject: [PATCH] fix(imperative): fix negative step of subtensor GitOrigin-RevId: d9127e7cbd7fe90b406d72085729ce43befbddad --- .../python/test/unit/core/test_indexing_op.py | 6 ++++++ .../test/unit/functional/test_tensor.py | 9 +++++++++ imperative/src/impl/ops/concatenate.cpp | 7 ++++--- imperative/src/impl/ops/subtensor.cpp | 19 ++++++++++++------- 4 files changed, 31 insertions(+), 10 deletions(-) diff --git a/imperative/python/test/unit/core/test_indexing_op.py b/imperative/python/test/unit/core/test_indexing_op.py index 7a77d239d..1f6360916 100644 --- a/imperative/python/test/unit/core/test_indexing_op.py +++ b/imperative/python/test/unit/core/test_indexing_op.py @@ -308,6 +308,12 @@ def test_subtensor(): x_[0:4:2, 3] += d np.testing.assert_equal(x_, yy2.numpy()) + x_ = x.copy() + xx_ = Tensor(x_) + np.testing.assert_equal(x_[::-1], xx_[::-1].numpy()) + np.testing.assert_equal(x_[::-2], xx_[::-2].numpy()) + np.testing.assert_equal(x_[::-1, ::-2], xx_[::-1, ::-2].numpy()) + def test_advance_indexing(): x = np.arange(25).reshape(5, 5).astype("int32") diff --git a/imperative/python/test/unit/functional/test_tensor.py b/imperative/python/test/unit/functional/test_tensor.py index 42af686ee..ff810493c 100644 --- a/imperative/python/test/unit/functional/test_tensor.py +++ b/imperative/python/test/unit/functional/test_tensor.py @@ -185,6 +185,15 @@ def test_stack(is_varnode): y.numpy(), np.array([[0, 6], [1, 7], [2, 8]]).astype(np.float32) ) + x1 = Tensor(np.random.rand(600)) + x2 = F.broadcast_to(Tensor(np.array(3)), (600,)) + + y = F.stack([x2, x1], axis=0) + np.testing.assert_equal(y.numpy(), np.stack((x2.numpy(), x1.numpy()), axis=0)) + + y = F.stack([x2, x2], axis=0) + np.testing.assert_equal(y.numpy(), np.stack((x2.numpy(), x2.numpy()), axis=0)) + @pytest.mark.parametrize("is_varnode", [True, False]) def test_split_basic(is_varnode): diff --git a/imperative/src/impl/ops/concatenate.cpp b/imperative/src/impl/ops/concatenate.cpp index cf0e34d7a..842c15606 100644 --- a/imperative/src/impl/ops/concatenate.cpp +++ b/imperative/src/impl/ops/concatenate.cpp @@ -209,7 +209,6 @@ SmallVector apply_on_physical_tensor( SmallVector& output_descs, const bool& validated) { auto&& op_def = def.cast_final_safe(); size_t nr_inp = inputs.size(); - TensorLayout inp_layout = inputs[0]->layout(); int axis = op_def.axis >= 0 ? op_def.axis : op_def.axis + inputs[0]->layout().ndim + 1; @@ -228,11 +227,13 @@ SmallVector apply_on_physical_tensor( } oup_layout = stack_layout_deduce(inputs_holder, axis); } - inp_layout.add_axis_cont_inplace(axis); + SmallVector expanded; for (size_t i = 0; i < nr_inp; ++i) { + TensorLayout layout = inputs[i]->layout(); + layout.add_axis_cont_inplace(axis); expanded.push_back( - Tensor::make(inputs[i]->blob(), inputs[i]->offset(), inp_layout)); + Tensor::make(inputs[i]->blob(), inputs[i]->offset(), layout)); } auto oup = Tensor::make(oup_layout, oup_cn); // because the dnn concat is very slow, we copy the slice code from diff --git a/imperative/src/impl/ops/subtensor.cpp b/imperative/src/impl/ops/subtensor.cpp index 6ca71989f..18fff76b4 100644 --- a/imperative/src/impl/ops/subtensor.cpp +++ b/imperative/src/impl/ops/subtensor.cpp @@ -76,15 +76,15 @@ auto origin_get_index( return ret; } +auto mod_size = [](int v, int size_ax) -> int { + if (size_ax == 0) + return 0; + return v < 0 ? v + size_ax : v; +}; + TensorLayout deduce_layout( TensorLayout src, std::vector> items, std::vector> slice_items) { - auto mod_size = [](int v, int size_ax) -> int { - if (size_ax == 0) - return 0; - return v < 0 ? v + size_ax : v; - }; - auto tostr = [](int v) -> std::string { return std::to_string(v); }; for (int i = items.size() - 1; i >= 0; i--) { @@ -108,7 +108,7 @@ TensorLayout deduce_layout( tostr(slice_stop).c_str(), tostr(slice_step).c_str(), tostr(axis).c_str()); } else { - slice_start = s_val == INT_MIN ? shape_axis - 1 : b_val; + slice_start = b_val == INT_MIN ? shape_axis - 1 : b_val; slice_start = mod_size(slice_start, shape_axis); slice_stop = e_val == INT_MAX ? -1 : mod_size(e_val, shape_axis); slice_start = std::min(slice_start, std::max(shape_axis - 1, 0)); @@ -202,6 +202,11 @@ SmallVector apply_on_physical_tensor( ax_val = ax_val < 0 ? layout.shape[axis] + ax_val : ax_val; offset += ax_val * layout.stride[axis] * dtype_size; } else { + if (s_val < 0) { + int shape_axis = src->layout().shape[axis]; + start = b_val == INT_MIN ? shape_axis - 1 : b_val; + start = mod_size(start, shape_axis); + } start = std::max(start, 0); offset += start * layout.stride[axis] * dtype_size; } -- GitLab