diff --git a/imperative/python/megengine/core/tensor/indexing.py b/imperative/python/megengine/core/tensor/indexing.py index f55136392c31e9a2a545d9632d92624c6b4323a0..4dc1da3bdb3e6eeced6424b11ad2919b5195922b 100644 --- a/imperative/python/megengine/core/tensor/indexing.py +++ b/imperative/python/megengine/core/tensor/indexing.py @@ -119,12 +119,16 @@ def unpack_getitem(inp, tuple_val, *, allow_newaxis=True): else 1 ) else: - if ndim_indexed > inp.ndim: - raise IndexError( - "too many indices for tensor: tensor is {}-dimensional, but {} were indexed".format( - inp.ndim, len(tuple_val) + try: + if ndim_indexed > inp.ndim: + raise IndexError( + "too many indices for tensor: tensor is {}-dimensional, but {} were indexed".format( + inp.ndim, len(tuple_val) + ) ) - ) + except ValueError: + # ignore + pass tuple_val = remove_ellipsis(inp, tuple_val) use_subtensor = True diff --git a/imperative/python/src/tensor.cpp b/imperative/python/src/tensor.cpp index 8cede64a80915bc07efa23fa5b01792cf1d7aa4c..d81de041503bec44b75be3a70fdfe39f69cf8df5 100644 --- a/imperative/python/src/tensor.cpp +++ b/imperative/python/src/tensor.cpp @@ -272,16 +272,12 @@ PyObject* TensorWrapper::device() { PyObject* TensorWrapper::numpy() { auto hv = m_tensor->numpy(); - // if (!hv) { - // PyErr_SetString(PyExc_ValueError, "tensor invalid"); - // return nullptr; - // } - auto arr = py::reinterpret_steal( - npy::ndarray_from_tensor(hv->as_nd(true), npy::ShareType::TRY_SHARE)); - if (!arr) { + if (!hv) { PyErr_SetString(PyExc_ValueError, "tensor invalid"); return nullptr; } + auto arr = py::reinterpret_steal( + npy::ndarray_from_tensor(hv->as_nd(true), npy::ShareType::TRY_SHARE)); if (hv->shape().is_scalar()) { mgb_assert(PyArray_Check(arr.ptr())); return PyArray_Squeeze(reinterpret_cast(arr.ptr())); diff --git a/imperative/src/impl/transformations/scalar.cpp b/imperative/src/impl/transformations/scalar.cpp index 891abdd7c319dcbb9cd6ec1c907398e80cf71dc5..8daa5827ea47d6fb9093adf7a2098003477c64bd 100644 --- a/imperative/src/impl/transformations/scalar.cpp +++ b/imperative/src/impl/transformations/scalar.cpp @@ -51,6 +51,7 @@ bool is_scalar_shape(ValueRef shape) { if (shape.is()) { return false; } + // may have performance issue auto shape_of_shape = shape.shape(); if (!shape_of_shape) { // assume not scalar @@ -211,14 +212,21 @@ std::vector subtensor_rule( const Subtensor& subtensor, Span inputs) { mgb_assert(inputs.size() >= 1); auto input = inputs[0]; - size_t ndim = input.is() ? 0 : input.shape()->ndim; - for (auto&& [axis, begin, end, step, idx] : subtensor.items) { - if (idx) { - ndim--; + bool is_scalar; + mgb_assert(!input.is(), "subtensor shouldn't have scalar input"); + if (auto shape = input.shape()) { + size_t ndim = input.shape()->ndim; + for (auto&& [axis, begin, end, step, idx] : subtensor.items) { + if (idx) { + ndim--; + } } + is_scalar = ndim == 0; + } else { + is_scalar = false; } auto output = imperative::apply(subtensor, unwrap_inputs(inputs))[0]; - if (!ndim) { + if (is_scalar) { return {ScalarValue::make(output)}; } else { return {output}; @@ -261,8 +269,7 @@ std::vector fastpath_copy_rule( std::vector reshape_rule(const Reshape& reshape, Span inputs) { mgb_assert(inputs.size() == 2); - bool is_scalar = - (!inputs[1].is()) && *inputs[1].shape() == ValueShape{0}; + bool is_scalar = is_scalar_shape(inputs[1]); auto unwrapped_input = inputs[0].is() ? inputs[0].cast().value() : inputs[0];