提交 26de4540 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!4639 rename param_value to param_info

Merge pull request !4639 from vlne-v1/rename_param_value
......@@ -19,7 +19,7 @@
#include "frontend/operator/ops.h"
#include "ir/tensor.h"
#include "ir/anf.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "runtime/device/kernel_runtime.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "utils/ms_utils.h"
......
......@@ -19,7 +19,7 @@
#include <unordered_set>
#include <set>
#include "frontend/operator/ops.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "runtime/device/kernel_info.h"
#include "backend/kernel_compiler/kernel_build_info.h"
......
......@@ -20,7 +20,7 @@
#include <unordered_set>
#include "pipeline/jit/parse/data_converter.h"
#include "ir/manager.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "backend/kernel_compiler/common_utils.h"
#include "frontend/operator/ops.h"
#include "common/trans.h"
......
......@@ -26,7 +26,7 @@
#include "ir/graph_utils.h"
#include "utils/symbolic.h"
#include "ir/meta_func_graph.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "pybind_api/ir/tensor_py.h"
#include "pipeline/jit/parse/python_adapter.h"
#include "pipeline/jit/parse/resolve.h"
......
......@@ -24,7 +24,7 @@
#include <string>
#include "ir/meta_func_graph.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "ir/primitive.h"
#include "ir/graph_utils.h"
#include "utils/utils.h"
......
......@@ -19,7 +19,7 @@
#include <string>
#include "ir/anf.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "pipeline/jit/parse/python_adapter.h"
namespace mindspore {
......
......@@ -28,7 +28,7 @@
#include <vector>
#include "ir/anf.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "ir/tensor.h"
#include "frontend/optimizer/opt.h"
#include "frontend/optimizer/optimizer.h"
......
......@@ -28,7 +28,7 @@
#include <utility>
#include "ir/tensor.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "frontend/operator/ops.h"
#include "frontend/optimizer/optimizer.h"
#include "frontend/parallel/auto_parallel/graph_costmodel.h"
......
......@@ -24,7 +24,7 @@
#include <functional>
#include "ir/func_graph_cloner.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "frontend/parallel/costmodel_context.h"
#include "frontend/parallel/context.h"
#include "pipeline/jit/pass.h"
......
......@@ -21,7 +21,7 @@
#include <vector>
#include <algorithm>
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "pipeline/jit/parse/data_converter.h"
#include "pipeline/jit/parse/parse.h"
#include "pipeline/jit/parse/python_adapter.h"
......
......@@ -24,7 +24,7 @@
#include <cstdlib>
#include <algorithm>
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "pipeline/jit/pass.h"
#include "pipeline/jit/parse/data_converter.h"
#include "frontend/optimizer/ad/dfunctor.h"
......
......@@ -24,7 +24,7 @@
#include "debug/trace.h"
#include "pybind_api/ir/tensor_py.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "utils/any.h"
#include "utils/utils.h"
#include "utils/ms_context.h"
......
......@@ -13,30 +13,30 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "pybind11/pybind11.h"
#include "pybind_api/api_register.h"
namespace mindspore {
namespace py = pybind11;
REGISTER_PYBIND_DEFINE(ParamValue, ([](const py::module *m) {
(void)py::class_<ParamValue, ParamValuePtr>(*m, "ParamValue")
REGISTER_PYBIND_DEFINE(ParamInfo, ([](const py::module *m) {
(void)py::class_<ParamInfo, ParamValuePtr>(*m, "ParamInfo")
.def(py::init())
.def("clone", &ParamValue::Clone)
.def_property("name", &ParamValue::name, &ParamValue::set_name)
.def_property("requires_grad", &ParamValue::requires_grad, &ParamValue::set_requires_grad)
.def_property("layerwise_parallel", &ParamValue::layerwise_parallel,
&ParamValue::set_layerwise_parallel)
.def("clone", &ParamInfo::Clone)
.def_property("name", &ParamInfo::name, &ParamInfo::set_name)
.def_property("requires_grad", &ParamInfo::requires_grad, &ParamInfo::set_requires_grad)
.def_property("layerwise_parallel", &ParamInfo::layerwise_parallel,
&ParamInfo::set_layerwise_parallel)
.def(py::pickle(
[](const ParamValue &p) { // __getstate__
[](const ParamInfo &p) { // __getstate__
return py::make_tuple(p.name(), p.requires_grad(), p.layerwise_parallel());
},
[](const py::tuple &t) { // __setstate__
if (t.size() != 6) {
std::runtime_error("Invalid state for ParamValue!");
std::runtime_error("Invalid state for ParamInfo!");
}
ParamValuePtr p = std::make_shared<ParamValue>();
ParamValuePtr p = std::make_shared<ParamInfo>();
p->set_name(t[1].cast<std::string>());
p->set_requires_grad(t[2].cast<bool>());
p->set_layerwise_parallel(t[3].cast<bool>());
......
......@@ -24,7 +24,7 @@
#include <functional>
#include "ir/tensor.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "debug/anf_ir_utils.h"
#include "frontend/operator/ops.h"
#include "proto/onnx.pb.h"
......
......@@ -27,7 +27,7 @@
#include "proto/onnx.pb.h"
#include "frontend/operator/ops.h"
#include "ir/tensor.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
namespace mindspore {
enum OpMergeMode {
......
......@@ -16,7 +16,7 @@
#include "utils/callbacks_ge.h"
#include "pybind11/pybind11.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "transform/graph_ir/df_graph_manager.h"
#include "transform/graph_ir/util.h"
#include "pipeline/jit/parse/data_converter.h"
......
......@@ -30,7 +30,7 @@
#include "pipeline/jit/parse/parse_base.h"
#include "ir/value.h"
#include "ir/tensor.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "utils/base_ref_extends.h"
#include "utils/ms_context.h"
......@@ -483,8 +483,8 @@ bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple
// Isomorphism
static bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node);
bool SameNodeShallow(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {
static bool SameNodeShallow(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {
if (equiv_node == nullptr) {
MS_LOG(ERROR) << "Invalid equiv_node";
return false;
......@@ -523,8 +523,8 @@ bool SameNodeShallow(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraph
return false;
}
static bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {
bool SameNode(const AnfNodePtr &node1, const AnfNodePtr &node2, FuncGraphPairMapEquiv *equiv_func_graph,
NodeMapEquiv *const equiv_node) {
MS_EXCEPTION_IF_NULL(node1);
MS_EXCEPTION_IF_NULL(node2);
if (node1->isa<CNode>() && node2->isa<CNode>()) {
......
......@@ -22,7 +22,7 @@
#include <vector>
#include "google/protobuf/io/zero_copy_stream_impl.h"
#include "ir/tensor.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "frontend/operator/ops.h"
#include "abstract/abstract_value.h"
#include "proto/onnx.pb.h"
......
......@@ -15,7 +15,7 @@
"""Parameter for cell."""
from copy import copy
from .._c_expression import ParamValue
from .._c_expression import ParamInfo
from . import dtype as mstype
from .initializer import initializer, Initializer
from .tensor import Tensor, MetaTensor
......@@ -79,7 +79,7 @@ class Parameter(MetaTensor):
Parameter, (data, self.name, self.requires_grad, self.layerwise_parallel))
def __init__(self, default_input, name, requires_grad=True, layerwise_parallel=False):
self._value = ParamValue()
self._value = ParamInfo()
self.name = name
self.requires_grad = requires_grad
self.layerwise_parallel = layerwise_parallel
......
......@@ -74,8 +74,8 @@ using VarPtr = std::shared_ptr<Var>;
class AnfIrVisitor;
class ParamValue;
using ParamValuePtr = std::shared_ptr<ParamValue>;
class ParamInfo;
using ParamValuePtr = std::shared_ptr<ParamInfo>;
// AnfNode is the basic class of the IR definition derived from Base.
// Only two types of nodes are derived: CNode and ANode.
......
......@@ -19,7 +19,7 @@
#include <algorithm>
#include "ir/manager.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "base/core_ops.h"
#include "utils/convert_utils_base.h"
#include "utils/log_adapter.h"
......
......@@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_IR_PARAM_VALUE_H_
#define MINDSPORE_CORE_IR_PARAM_VALUE_H_
#ifndef MINDSPORE_CORE_IR_PARAM_INFO_H_
#define MINDSPORE_CORE_IR_PARAM_INFO_H_
#include <atomic>
#include <memory>
......@@ -25,13 +25,13 @@
#include "ir/tensor.h"
namespace mindspore {
class ParamValue {
class ParamInfo {
public:
ParamValue() {}
ParamInfo() {}
ParamValue(const ParamValue &other) = default;
ParamInfo(const ParamInfo &other) = default;
virtual ~ParamValue() = default;
virtual ~ParamInfo() = default;
const std::string &name() const { return name_; }
void set_name(const std::string &name) { name_ = name; }
......@@ -58,7 +58,7 @@ class ParamValue {
ParamValuePtr Clone() {
static std::atomic<int32_t> parameter_cloned_index{1};
int32_t index = parameter_cloned_index.fetch_add(1, std::memory_order_relaxed);
auto clone = std::make_shared<ParamValue>(*this);
auto clone = std::make_shared<ParamInfo>(*this);
clone->be_cloned_ = false;
clone->cloned_ = true;
clone->be_cloned_index_ = {};
......@@ -78,4 +78,4 @@ class ParamValue {
int32_t cloned_index_{0};
};
} // namespace mindspore
#endif // MINDSPORE_CORE_IR_PARAM_VALUE_H_
#endif // MINDSPORE_CORE_IR_PARAM_INFO_H_
......@@ -22,7 +22,6 @@
#include <vector>
#include <utility>
#include "ir/param_value.h"
#include "ir/dtype/type_id.h"
namespace mindspore {
......
......@@ -22,7 +22,7 @@
#include "utils/utils.h"
#include "backend/kernel_compiler/kernel_build_info.h"
#include "backend/optimizer/common/optimizer.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#define private public
#define protected public
#include "backend/optimizer/ascend/enhancer/insert_memcpy_async_for_hccl_op.h"
......
......@@ -15,7 +15,7 @@
*/
#include "common/common_test.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "frontend/operator/ops.h"
#include "backend/session/kernel_graph.h"
#include "backend/session/anf_runtime_algorithm.h"
......
......@@ -15,7 +15,7 @@
*/
#include "common/common_test.h"
#include "ir/param_value.h"
#include "ir/param_info.h"
#include "frontend/operator/ops.h"
#include "backend/session/kernel_graph.h"
#include "backend/session/anf_runtime_algorithm.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册