diff --git a/mindspore/ccsrc/backend/kernel_compiler/kernel.h b/mindspore/ccsrc/backend/kernel_compiler/kernel.h index 5725bc80aeeeca6ab8f119725034bc05d7634af2..01f8e75f49a1f180215aebbb77033c6a2ee9dc79 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/kernel.h @@ -30,14 +30,6 @@ namespace mindspore { enum KernelType : int { UNKNOWN_KERNEL_TYPE = 0, AKG_KERNEL, AICPU_KERNEL, RT_KERNEL, HCCL_KERNEL, TBE_KERNEL }; namespace kernel { - -enum Axis : int { - N = 0, - C, - H, - W, -}; - // Supported fusion type enum FusionType { CONVLUTION = 0, diff --git a/mindspore/ccsrc/backend/kernel_compiler/kernel_build_info.h b/mindspore/ccsrc/backend/kernel_compiler/kernel_build_info.h index f45a1b4887665a8c8a89dd7542f308632b0671e5..71d9825d5a76303ae8d3b929edce54d6c6c45eec 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/kernel_build_info.h +++ b/mindspore/ccsrc/backend/kernel_compiler/kernel_build_info.h @@ -22,6 +22,7 @@ #include #include #include "ir/dtype.h" +#include "ir/kernel_info_dev.h" #include "backend/kernel_compiler/kernel.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc index 5635811425cd621d22ea0209e41b9bfcdfd6be28..ec176e324d26b11212635c167d9c3e8647854435 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc @@ -406,16 +406,16 @@ void TbeKernelSelect::StringToAxisVector(const std::string &reshape_type_str, st for (const auto &c : reshape_type_str) { switch (c) { case 'N': - reshape_type_vec->push_back(kernel::N); + reshape_type_vec->push_back(N); break; case 'C': - reshape_type_vec->push_back(kernel::C); + reshape_type_vec->push_back(C); break; case 'H': - reshape_type_vec->push_back(kernel::H); + reshape_type_vec->push_back(H); break; case 'W': - reshape_type_vec->push_back(kernel::W); + reshape_type_vec->push_back(W); break; default: MS_LOG(EXCEPTION) << "Unknown axis " << c << "in reshape type."; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.cc b/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.cc index d37a31b95afc8bfb67bee854d475202d2e91dd8f..1c96b417bb4dbbaa56c715f97b0bc9d3299bc2ad 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.cc @@ -55,7 +55,7 @@ AnfNodePtr AddTransOpNodeToGraph(const FuncGraphPtr &func_graph, const AnfNodePt CNodePtr trans_data = nullptr; std::string input_format = is_insert_input ? kOpFormat_DEFAULT : AnfAlgo::GetOutputFormat(node, 0); std::string dst_format = is_insert_input ? AnfAlgo::GetInputFormat(node, 0) : kOpFormat_DEFAULT; - std::vector padding_axis; + std::vector padding_axis; MS_EXCEPTION_IF_NULL(node); // if insert transdata for input we need to change the input if (is_insert_input) { @@ -170,7 +170,7 @@ AnfNodePtr InsertTransOpForMultipleOutput(const FuncGraphPtr &func_graph, const } } // namespace void RefreshKernelBuildInfo(const std::string &input_format, const std::string &output_format, - const AnfNodePtr &trans_data, const std::vector &reshape_type, + const AnfNodePtr &trans_data, const std::vector &reshape_type, const TypeId &type_id) { MS_EXCEPTION_IF_NULL(trans_data); auto ori_build_info = AnfAlgo::GetSelectKernelBuildInfo(trans_data); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.h b/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.h index 4d2833b999b749d8d44e0ed606293b19109ca4d0..b7a86efa4ce361103a931eb23857913ddaea2749 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.h @@ -86,7 +86,7 @@ class OpFinder { using OpFinderPtr = std::shared_ptr; void RefreshKernelBuildInfo(const std::string &input_format, const std::string &output_format, - const AnfNodePtr &trans_data, const std::vector &reshape_type = {}, + const AnfNodePtr &trans_data, const std::vector &reshape_type = {}, const TypeId &type_id = kTypeUnknown); CNodePtr NewTransOpNode(const FuncGraphPtr &func_graph, const AnfNodePtr &input, const KernelSelectPtr &kernel_select, diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc index 8a212be1f02edd321437043f5b1bb06e0fbeeb98..06c1219e93d93cd0ba5dcc8822d6ef59357e6877 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc @@ -418,7 +418,7 @@ std::string AnfRuntimeAlgorithm::GetPrevNodeOutputFormat(const AnfNodePtr &anf_n return AnfRuntimeAlgorithm::GetOutputFormat(kernel_with_index.first, kernel_with_index.second); } -std::vector AnfRuntimeAlgorithm::GetPrevNodeOutputReshapeType(const AnfNodePtr &node, size_t input_idx) { +std::vector AnfRuntimeAlgorithm::GetPrevNodeOutputReshapeType(const AnfNodePtr &node, size_t input_idx) { KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(node, input_idx); return GetOutputReshapeType(kernel_with_index.first, kernel_with_index.second); } @@ -483,7 +483,7 @@ std::vector AnfRuntimeAlgorithm::GetInputDeviceShape(const AnfNodePtr &n return trans::TransShapeToDevice(infer_shape, format); } -std::vector AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNodePtr &node, size_t input_idx) { +std::vector AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNodePtr &node, size_t input_idx) { MS_EXCEPTION_IF_NULL(node); if (input_idx > GetInputTensorNum(node)) { MS_LOG(EXCEPTION) << "The index:" << input_idx @@ -503,7 +503,7 @@ std::vector AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNode return build_info->GetInputReshapeType(input_idx); } -std::vector AnfRuntimeAlgorithm::GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx) { +std::vector AnfRuntimeAlgorithm::GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx) { MS_EXCEPTION_IF_NULL(node); if (output_idx > GetOutputTensorNum(node)) { MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ " diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h index 89a5ecbce3949a2ec6cfec8c3f6e2758f572e2d0..dfd3fede50af19139c37d7048099af8575beee5d 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h @@ -27,6 +27,7 @@ #include "ir/dtype.h" #include "base/base.h" #include "ir/primitive.h" +#include "ir/kernel_info_dev.h" #include "runtime/device/device_address.h" #include "backend/kernel_compiler/kernel.h" #include "backend/kernel_compiler/kernel_build_info.h" @@ -109,7 +110,7 @@ class AnfRuntimeAlgorithm { // get output format from prev node,input_index is the input index of current node related to prev node static std::string GetPrevNodeOutputFormat(const AnfNodePtr &node, size_t input_idx); // get reshape_type of from the output of input node. - static std::vector GetPrevNodeOutputReshapeType(const AnfNodePtr &node, size_t input_idx); + static std::vector GetPrevNodeOutputReshapeType(const AnfNodePtr &node, size_t input_idx); // get output shapes inferred by ME from input nodes. static std::vector GetOutputInferShape(const AnfNodePtr &node, size_t output_idx); // get input shapes inferred by ME from input nodes. @@ -119,9 +120,9 @@ class AnfRuntimeAlgorithm { // get input shapes which will built and run in device static std::vector GetInputDeviceShape(const AnfNodePtr &node, size_t input_idx); // Get Input Padding Axis - static std::vector GetInputReshapeType(const AnfNodePtr &node, size_t output_idx); + static std::vector GetInputReshapeType(const AnfNodePtr &node, size_t output_idx); // Get Output Padding Axis - static std::vector GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx); + static std::vector GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx); // get output data type inferred by ME of anf node static TypeId GetOutputInferDataType(const AnfNodePtr &node, size_t output_idx); // get output original data type from prev node,input_index is the input index of current node related to prev node diff --git a/mindspore/ccsrc/backend/session/session_basic.cc b/mindspore/ccsrc/backend/session/session_basic.cc index e32bb4baef298e3b811cde8ac2e71b02a1f9425a..fadeeca49b90d557fc8ffa6c6b558a2028787289 100644 --- a/mindspore/ccsrc/backend/session/session_basic.cc +++ b/mindspore/ccsrc/backend/session/session_basic.cc @@ -66,12 +66,13 @@ tensor::TensorPtr CreateOutputTensor(const AnfNodePtr &node, size_t output_index if (type_id == kTypeUnknown) { type_id = AnfAlgo::GetOutputInferDataType(node, output_index); } - tensor::TensorPtr tensor; + tensor::TensorPtr tensor = nullptr; std::vector temp_shape; if (graph->IsUniqueTargetInternalOutput(node, output_index)) { temp_shape.emplace_back(1); tensor = std::make_shared(type_id, temp_shape); tensor->set_device_address(address); + tensor->set_padding_type(AnfAlgo::GetOutputReshapeType(node, output_index)); tensor->set_dirty(false); return tensor; } @@ -86,6 +87,7 @@ tensor::TensorPtr CreateOutputTensor(const AnfNodePtr &node, size_t output_index graph->AddInternalOutputTensor(node, output_index, tensor); } } + tensor->set_padding_type(AnfAlgo::GetOutputReshapeType(node, output_index)); // if in paynative mode,data only copyed to host when user want to print data auto ms_context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(ms_context); @@ -240,6 +242,7 @@ ParameterPtr ConstructRunOpParameter(const std::shared_ptr &graph, } else { kernel_build_info_builder->SetOutputsFormat(std::vector{device_address->format()}); kernel_build_info_builder->SetOutputsDeviceType(std::vector{device_address->type_id()}); + kernel_build_info_builder->SetOutputsReshapeType({input_tensor->padding_type()}); } AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info_builder->Build(), param.get()); // construct abstract of parameter diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index 1b10a7d2f76b9028718d88b23cf107885d407221..d6da517fce28cc4c098b2f235fb73529300e23a1 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -399,7 +399,7 @@ std::vector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index) { return shape; } -std::vector PaddingShapeTo4d(const std::vector &shape, const std::vector &padding_axis) { +std::vector PaddingShapeTo4d(const std::vector &shape, const std::vector &padding_axis) { if (padding_axis.empty() || shape.size() != padding_axis.size()) { return PaddingShapeTo4dByDefault(shape); } diff --git a/mindspore/ccsrc/common/trans.h b/mindspore/ccsrc/common/trans.h index 286c76afd0e878a3846ef055876f90312f2f1526..c815fbc31f8e28d58614665a55db7008fcc332ad 100644 --- a/mindspore/ccsrc/common/trans.h +++ b/mindspore/ccsrc/common/trans.h @@ -51,8 +51,7 @@ size_t TypeIdSize(const TypeId data_type); size_t ShapeSize(const std::vector &shape); size_t CubeSizeByType(const TypeId data_type); -std::vector PaddingShapeTo4d(const std::vector &shape, - const std::vector &padding_axis = {}); +std::vector PaddingShapeTo4d(const std::vector &shape, const std::vector &padding_axis = {}); std::vector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index); bool IsNeedPadding(const std::string &format, const size_t shape_size); std::vector TransShapeToDevice(const std::vector &shape, const std::string &format); diff --git a/mindspore/core/ir/kernel_info_dev.h b/mindspore/core/ir/kernel_info_dev.h index 70665a14713dfceaf980c7997569fc7eff951dd4..9d20a9f67e33e36d249f4cf5875cf1653eeb3efd 100644 --- a/mindspore/core/ir/kernel_info_dev.h +++ b/mindspore/core/ir/kernel_info_dev.h @@ -20,6 +20,12 @@ #include namespace mindspore { +enum Axis : int { + N = 0, + C, + H, + W, +}; // Interface for device kernel program information. class KernelInfoDevice { public: diff --git a/mindspore/core/ir/tensor.cc b/mindspore/core/ir/tensor.cc index edcabc67be484d4873267c00b4f0d097f7409325..39340dc39a490568a72d00b12f58e1937c66e3e8 100644 --- a/mindspore/core/ir/tensor.cc +++ b/mindspore/core/ir/tensor.cc @@ -384,7 +384,8 @@ Tensor::Tensor(const Tensor &tensor) data_(tensor.data_), dirty_(tensor.dirty_), id_(tensor.id_), - device_sync_(tensor.device_sync_) {} + device_sync_(tensor.device_sync_), + padding_type_(tensor.padding_type()) {} Tensor::Tensor(const Tensor &tensor, TypeId data_type) : MetaTensor(data_type, tensor.shape_), @@ -392,7 +393,8 @@ Tensor::Tensor(const Tensor &tensor, TypeId data_type) data_(MakeTensorData(data_type, tensor.shape_, tensor.data_->data(), tensor.data_type_)), dirty_(tensor.dirty_), id_(tensor.id_), - device_sync_(tensor.device_sync_) {} + device_sync_(tensor.device_sync_), + padding_type_(tensor.padding_type()) {} Tensor::Tensor(TypeId data_type, const std::vector &shape, TensorDataPtr data) : MetaTensor(data_type, shape), data_(std::move(data)), id_(MakeId()) {} @@ -441,6 +443,7 @@ Tensor &Tensor::AssignValue(const Tensor &tensor) { device_sync_ = tensor.device_sync_; data_ = tensor.data_; id_ = tensor.id_; + padding_type_ = tensor.padding_type_; } return *this; } diff --git a/mindspore/core/ir/tensor.h b/mindspore/core/ir/tensor.h index c61add5a23da32c40eb0988ff961785f871e2ee7..6db6147beb0d758aa8478459471658461e82e5d8 100644 --- a/mindspore/core/ir/tensor.h +++ b/mindspore/core/ir/tensor.h @@ -221,6 +221,8 @@ class Tensor : public MetaTensor { DeviceSyncPtr device_address() const { return device_sync_; } void set_device_address(const DeviceSyncPtr &device_sync) { device_sync_ = device_sync; } + void set_padding_type(std::vector padding_type) { padding_type_ = padding_type; } + std::vector padding_type() const { return padding_type_; } std::string id() const { return id_; } @@ -230,6 +232,7 @@ class Tensor : public MetaTensor { bool dirty_{true}; std::string id_{""}; DeviceSyncPtr device_sync_{nullptr}; + std::vector padding_type_; }; using TensorPtr = std::shared_ptr; using TensorPtrList = std::vector>; diff --git a/tests/ut/cpp/utils/signal_test.cc b/tests/ut/cpp/utils/signal_test.cc index f8b5acd40f2c2b2be50327269730d04a0e29890b..6ae0c081925654399051d79b1e303d2671f8664b 100644 --- a/tests/ut/cpp/utils/signal_test.cc +++ b/tests/ut/cpp/utils/signal_test.cc @@ -54,10 +54,10 @@ class A { std::shared_ptr i; }; -class C : public A { +class Ca : public A { public: - C() {} - explicit C(signals *sigs) : A(sigs) { printf("conn C:%p\n", this); } + Ca() {} + explicit Ca(signals *sigs) : A(sigs) { printf("conn C:%p\n", this); } void FuncA(int v1, float v2, std::string str) { printf("C: --%d--%f--%s--\n", v1, v2, str.c_str()); } }; @@ -71,13 +71,13 @@ class B : public A { TEST_F(TestSignal, test_common) { A objA; B objB; - C objC; + Ca objC; Signal signal; signal.connect(&objA, &A::FuncA); signal.connect(&objB, &B::FuncA); - signal.connect(&objC, &C::FuncA); + signal.connect(&objC, &Ca::FuncA); signal(20, 20, "Signal-Slot test"); } @@ -85,11 +85,11 @@ TEST_F(TestSignal, test_sigs) { signals sigs; A objA(&sigs); B objB(&sigs); - C objC(&sigs); + Ca objC(&sigs); sigs.signal.connect(&objA, &A::FuncA); sigs.signal.connect(&objB, &B::FuncA); - sigs.signal.connect(&objC, &C::FuncA); + sigs.signal.connect(&objC, &Ca::FuncA); sigs.signal(20, 20, "sigs Signal-Slot test"); } @@ -97,7 +97,7 @@ TEST_F(TestSignal, test_sigs_Named) { signals sigs; A objA(&sigs); B objB(&sigs); - C objC(&sigs); + Ca objC(&sigs); sigs.signal(10, 20, "Signal-Slot test"); std::shared_ptr a;