diff --git a/mindspore/ccsrc/pipeline/jit/pipeline.cc b/mindspore/ccsrc/pipeline/jit/pipeline.cc index 062dff725549ae797baa392413b8eb19570697f7..0c04b4478aadba37c70de1b4ee645eab69e9e264 100644 --- a/mindspore/ccsrc/pipeline/jit/pipeline.cc +++ b/mindspore/ccsrc/pipeline/jit/pipeline.cc @@ -751,19 +751,17 @@ py::object ExecutorPy::Run(const py::tuple &args, const py::object &phase) { return ExecDFGraph(info_, args, phase_s); } #else - if (backend == "ms" || backend == "ge") { - auto ret_val = std::make_shared(); - if (info_.count(phase_s) != 0 && info_[phase_s]->func_graph != nullptr) { - if (IsGraphOutputValueNodeOrParameter(info_[phase_s]->func_graph->output(), args, ret_val)) { - return *ret_val; - } + auto ret_val = std::make_shared(); + if (info_.count(phase_s) != 0 && info_[phase_s]->func_graph != nullptr) { + if (IsGraphOutputValueNodeOrParameter(info_[phase_s]->func_graph->output(), args, ret_val)) { + return *ret_val; } - if (backend == "ge") { - if (args.size() > 0) { - return args[0]; - } - return args; + } + if (backend == "ge") { + if (args.size() > 0) { + return args[0]; } + return args; } #endif std::size_t full_arg_size = ArgListSize(phase_s); diff --git a/mindspore/common/parameter.py b/mindspore/common/parameter.py index 69311555dcec05d2b01fea9c988fecc412efebf2..a217985899ce8d03bbba8a4ad02e55dd93a9f47a 100644 --- a/mindspore/common/parameter.py +++ b/mindspore/common/parameter.py @@ -389,6 +389,8 @@ class Parameter(MetaTensor): raise RuntimeError("Must set or change parallel mode before any Initializer created.") if self.init_mode is None: return self + if self.inited_param is not None: + return self.inited_param if layout is not None: if not isinstance(layout, list): raise TypeError("The layout should be list! layout is {}.".format(layout)) diff --git a/mindspore/core/ir/meta_tensor_extends.cc b/mindspore/core/ir/meta_tensor_extends.cc index b2476628cbbec7c392b6e6011e9031625055cba8..1ae5ef9a99bf1799108782fe353a428803913fa1 100644 --- a/mindspore/core/ir/meta_tensor_extends.cc +++ b/mindspore/core/ir/meta_tensor_extends.cc @@ -36,8 +36,8 @@ abstract::AbstractBasePtr MetaTensor::ToAbstract() { auto abs_tensor = std::make_shared(dtype, tensor_shape); // if is parameter always no value. - if (is_parameter()) { - auto param_name = param_info()->name(); + if (is_parameter_) { + auto param_name = param_info_->name(); auto ref_key = std::make_shared(param_name); auto abs_ref_key = ref_key->ToAbstract(); abs_tensor = std::make_shared(abs_ref_key, abs_tensor); diff --git a/mindspore/core/ir/tensor.cc b/mindspore/core/ir/tensor.cc index cb946cd0f8a7d470f8b270705974405732283e82..11ead05e3250e553b00103a44ef1018985a05667 100644 --- a/mindspore/core/ir/tensor.cc +++ b/mindspore/core/ir/tensor.cc @@ -476,8 +476,8 @@ abstract::AbstractBasePtr Tensor::ToAbstract() { auto tensor_shape = tens->shape(); auto abs_tensor = std::make_shared(dtype, tensor_shape); // if is parameter always no value. - if (is_parameter()) { - auto param_name = param_info()->name(); + if (is_parameter_) { + auto param_name = param_info_->name(); auto ref_key = std::make_shared(param_name); auto abs_ref_key = ref_key->ToAbstract(); abs_tensor = std::make_shared(abs_ref_key, abs_tensor); diff --git a/tests/ut/python/nn/test_parameter.py b/tests/ut/python/nn/test_parameter.py index ec0d771075844ad8f3a34979bd0b58b55ea579c4..d0fd60599a0c0882eb6f3187b6092ee2dc4b4f56 100644 --- a/tests/ut/python/nn/test_parameter.py +++ b/tests/ut/python/nn/test_parameter.py @@ -17,7 +17,7 @@ import numpy as np import pytest -from mindspore import context, Tensor, Parameter, ParameterTuple +from mindspore import context, Tensor, Parameter, ParameterTuple, nn from mindspore._checkparam import _check_str_by_regular from mindspore.common import dtype as mstype from mindspore.common.initializer import initializer @@ -229,3 +229,25 @@ def test_parameter_lazy_init(): para.set_parameter_data(initializer('ones', [1, 2], mstype.float32), slice_shape=True) assert np.array_equal(para.default_input.asnumpy(), np.ones((1, 2))) context.reset_auto_parallel_context() + + +def test_parameter_as_output(): + context.reset_auto_parallel_context() + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") + initial_input = initializer('One', shape=(2,), dtype=mstype.int32) + updated_input = Tensor([2, 2], mstype.int32) + class Net(nn.Cell): + def __init__(self, initial, updated): + super().__init__() + self.initial = initial + self.updated = updated + self.p = Parameter(self.initial, name="weight") + self.new_p = self.p.init_data() + self.new_p.set_parameter_data(self.updated) + def construct(self): + return self.new_p + + net = Net(initial_input, updated_input) + output = net() + assert np.array_equal(output.asnumpy(), np.array([2, 2], np.int32)) + context.reset_auto_parallel_context()