diff --git a/mindspore/ccsrc/pipeline/jit/parse/function_block.cc b/mindspore/ccsrc/pipeline/jit/parse/function_block.cc index 588c099082dac086b27f2e93cbb9fcda6db460a9..9ccaa46fed15e77d7becaeab6929007dad98ec6c 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/function_block.cc +++ b/mindspore/ccsrc/pipeline/jit/parse/function_block.cc @@ -99,7 +99,7 @@ AnfNodePtr FunctionBlock::MakeResolveAstOp(const py::object &op) { } // Resolve class member, two possible: method, member variable -AnfNodePtr FunctionBlock::MakeResolveClassMember(std::string attr) { +AnfNodePtr FunctionBlock::MakeResolveClassMember(const std::string &attr) { py::object namespace_var = parser_.ast()->CallParseModFunction(PYTHON_MOD_GET_MEMBER_NAMESPACE_SYMBOL, parser_.ast()->obj()); NameSpacePtr name_space = std::make_shared(RESOLVE_NAMESPACE_NAME_CLASS_MEMBER, namespace_var); diff --git a/mindspore/ccsrc/pipeline/jit/parse/function_block.h b/mindspore/ccsrc/pipeline/jit/parse/function_block.h index 2029531db4c2d29ffb59c7d524b9673abdf522a2..e598790cd4595aeac5130718c57bdc5b0150013c 100644 --- a/mindspore/ccsrc/pipeline/jit/parse/function_block.h +++ b/mindspore/ccsrc/pipeline/jit/parse/function_block.h @@ -68,7 +68,7 @@ class FunctionBlock : public std::enable_shared_from_this { void AddGlobalVar(const std::string &var_name) { (void)global_vars_.insert(var_name); } bool IsGlobalVar(const std::string &var_name) { return global_vars_.find(var_name) != global_vars_.end(); } AnfNodePtr MakeResolveAstOp(const py::object &op); - AnfNodePtr MakeResolveClassMember(std::string attr); + AnfNodePtr MakeResolveClassMember(const std::string &attr); AnfNodePtr MakeResolveSymbol(const std::string &value); AnfNodePtr MakeResolveOperation(const std::string &value); AnfNodePtr MakeResolve(const std::shared_ptr &name_space, const std::shared_ptr &resolve_symbol); diff --git a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc index fd5c8f1965bc07b99e2c99e246464598c6cefdc8..a39a931b294d85f0897390737ee1bd0d1b459130 100644 --- a/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pipeline/pynative/pynative_execute.cc @@ -268,6 +268,12 @@ py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tu TypeIdToMsTypeStr(it->second)); } } + if (!py::isinstance(py_args[i]) && !py::isinstance(py_args[i]) && + !py::isinstance(py_args[i])) { + MS_EXCEPTION(TypeError) << "For '" << prim->name() << "', the " << i << "th input is a not support type: " + << py::cast(py_args[1].attr("__class__").attr("__name__")) + << ", and the value is " << py::cast(py_args[i]) << "."; + } py::object cast_output = DoAutoCast(py_args[i], it->second); (*out_args)[i] = cast_output; (*out_args_list)[i] = cast_output; diff --git a/tests/ut/python/pynative_mode/test_implicit_conversion.py b/tests/ut/python/pynative_mode/test_implicit_conversion.py index ecaffd87f26100b9b206de8f355c48b650dabc49..3a19732462c0a31f246730a75dff496d1365f7ce 100644 --- a/tests/ut/python/pynative_mode/test_implicit_conversion.py +++ b/tests/ut/python/pynative_mode/test_implicit_conversion.py @@ -14,6 +14,7 @@ # ============================================================================ """ test implicit conversion """ import numpy as np +import pytest from mindspore import Tensor, nn from mindspore.ops import composite as C @@ -90,6 +91,30 @@ def test_float_tensor_and_bool_tensors_add(): assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all() +def test_float_tensor_and_str_add(): + x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) + y = "ok" + with pytest.raises(TypeError) as er: + ret = x + y + assert "For 'TensorAdd', the 1th input is a not support type: str" in str(er.value) + + +def test_float_tensor_and_tuple_add(): + x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) + y = (1, 2, 3) + with pytest.raises(TypeError) as er: + ret = x + y + assert "For 'TensorAdd', the 1th input is a not support type: tuple" in str(er.value) + + +def test_float_tensor_and_list_add(): + x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) + y = [1, 2, 3] + with pytest.raises(TypeError) as er: + ret = x + y + assert "For 'TensorAdd', the 1th input is a not support type: list" in str(er.value) + + def test_float_tensor_and_bool_tensors_add_grad(): class Net(nn.Cell): def __init__(self): @@ -104,7 +129,6 @@ def test_float_tensor_and_bool_tensors_add_grad(): self.net = net def construct(self, x, y, sens): - return C.grad_all_with_sens(self.net)(x, y, sens) x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) @@ -133,7 +157,6 @@ def test_float_tensor_and_int_tensors_sub_grad(): self.net = net def construct(self, x, y, sens): - return C.grad_all_with_sens(self.net)(x, y, sens) x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32)) @@ -163,7 +186,6 @@ def test_float16_tensor_and_float32_tensors_sub_grad(): self.net = net def construct(self, x, y, sens): - return C.grad_all_with_sens(self.net)(x, y, sens) x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.int32))