diff --git a/mindspore/ccsrc/device/cpu/kernel_select_cpu.cc b/mindspore/ccsrc/device/cpu/kernel_select_cpu.cc index 6972a58125d3cfc46cc136925115d2cbd0e2322b..95a75e268821007e133ac9ccb893fb84152747f8 100644 --- a/mindspore/ccsrc/device/cpu/kernel_select_cpu.cc +++ b/mindspore/ccsrc/device/cpu/kernel_select_cpu.cc @@ -85,7 +85,7 @@ bool IsInputFormatDtypeMatched(const KernelAttr &kernel_attr, const std::vector< const std::vector &input_types, const std::vector &input_not_cnode_indexes) { if (kernel_attr.GetInputSize() != input_types.size()) { - MS_LOG(ERROR) << "required input num:" << kernel_attr.GetInputSize() << ", actual input num:" << input_types.size(); + MS_LOG(DEBUG) << "required input num:" << kernel_attr.GetInputSize() << ", actual input num:" << input_types.size(); return false; } auto input_num = input_types.size(); diff --git a/mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.cc b/mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..d0db0c7685802c7ce0ba88f3b1ddccffcee4c1b2 --- /dev/null +++ b/mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/cpu/addn_cpu_kernel.h" +#include "device/cpu/cpu_device_address.h" +#include "ir/primitive.h" + +namespace mindspore { +namespace kernel { +void AddNCPUKernel::InitKernel(const CNodePtr &kernel_node) { + CheckParam(kernel_node); + input_num_ = AnfAlgo::GetInputTensorNum(kernel_node); + output_shape_ = AnfAlgo::GetOutputInferShape(kernel_node, 0); + CPUKernelUtils::ExpandDimsTo4(&output_shape_); +} + +bool AddNCPUKernel::Launch(const std::vector &inputs, + const std::vector & /*workspace*/, + const std::vector &outputs) { + auto output_addr = reinterpret_cast(outputs[0]->addr); + + for (size_t i = 0; i < output_shape_[0]; ++i) { + for (size_t j = 0; j < output_shape_[1]; ++j) { + for (size_t k = 0; k < output_shape_[2]; ++k) { + for (size_t m = 0; m < output_shape_[3]; ++m) { + auto offset = CPUKernelUtils::CalcOffset(output_shape_, i, j, k, m); + float sum = 0; + for (size_t index = 0; index < input_num_; ++index) { + auto input_addr = reinterpret_cast(inputs[index]->addr); + sum += input_addr[offset]; + } + output_addr[offset] = sum; + } + } + } + } + + return true; +} + +void AddNCPUKernel::CheckParam(const CNodePtr &kernel_node) { + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + if (input_shape.size() > 4) { + MS_LOG(EXCEPTION) << "Input dims is " << input_shape.size() << ", but AddNCPUKernel olny support 4d or lower."; + } + + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 1) { + MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but AddNCPUKernel needs 1 output."; + } +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.h b/mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..70b65233c7304458b43c778b628637fdb832c135 --- /dev/null +++ b/mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.h @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_CPU_ADDN_CPU_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_CPU_ADDN_CPU_KERNEL_H_ +#include +#include +#include "kernel/cpu/cpu_kernel.h" +#include "kernel/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class AddNCPUKernel : public CPUKernel { + public: + AddNCPUKernel() : input_num_(0) {} + ~AddNCPUKernel() override = default; + + void InitKernel(const CNodePtr &kernel_node) override; + + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + + private: + void CheckParam(const CNodePtr &kernel_node); + size_t input_num_; + std::vector output_shape_; +}; + +MS_REG_CPU_KERNEL( + AddN, + KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + AddNCPUKernel); +MS_REG_CPU_KERNEL(AddN, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32), + AddNCPUKernel); +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_CPU_ADDN_CPU_KERNEL_H_ diff --git a/mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.cc b/mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.cc index 54d6d2b46faca2829df0c9481c169de94f33e081..55b37ba13fa5da03107ed4b19c6eae9bc7e297de 100644 --- a/mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.cc +++ b/mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.cc @@ -42,7 +42,7 @@ std::shared_ptr CPUKernelFactory::Create(const std::string &kernel_na MS_EXCEPTION_IF_NULL(kernel_info); const KernelBuildInfo *kernel_build_Info = kernel_info->select_kernel_build_info(); MS_EXCEPTION_IF_NULL(kernel_build_Info); - std::pair ret_pair = CPUKernelAttrCheck(kernel_name, kernel_build_Info); + std::pair ret_pair = CPUKernelAttrCheck(kernel_name, *kernel_build_Info); if (ret_pair.first) { return (name_to_attr_creator_.find(kernel_name)->second)[ret_pair.second].second(); } @@ -50,7 +50,7 @@ std::shared_ptr CPUKernelFactory::Create(const std::string &kernel_na } std::pair CPUKernelFactory::CPUKernelAttrCheck(const std::string &kernel_name, - const KernelBuildInfo *kernel_info) { + const KernelBuildInfo &kernel_info) { auto iter = name_to_attr_creator_.find(kernel_name); if (iter == name_to_attr_creator_.end()) { MS_LOG(INFO) << "Not registered CPU kernel: op[" << kernel_name << "]!"; @@ -59,27 +59,34 @@ std::pair CPUKernelFactory::CPUKernelAttrCheck(const std::string & auto creators = iter->second; for (size_t index = 0; index < creators.size(); ++index) { auto attr_creator = creators[index]; - for (size_t i = 0; i < kernel_info->GetInputNum(); ++i) { - if (kernel_info->GetInputDeviceType(i) != attr_creator.first.GetInputAttr(i).first) { - MS_LOG(WARNING) << "cpu kernel attr check failed. input index: " << i << "."; - MS_LOG(WARNING) << "kernel info type:" << kernel_info->GetInputDeviceType(i) << ", " - << "register type:" << attr_creator.first.GetInputAttr(i).first; - return std::make_pair(false, 0); - } + if (CPUKernelSingleAttrCheck(attr_creator, kernel_info)) { + return std::make_pair(true, index); } - for (size_t i = 0; i < kernel_info->GetOutputNum(); ++i) { - if (kernel_info->GetOutputDeviceType(i) != attr_creator.first.GetOutputAttr(i).first) { - MS_LOG(WARNING) << "cpu kernel attr check failed. output index: " << i << "."; - MS_LOG(WARNING) << "kernel info type:" << kernel_info->GetOutputDeviceType(i) << ", " - << "register type:" << attr_creator.first.GetOutputAttr(i).first; - return std::make_pair(false, 0); - } - } - return std::make_pair(true, index); } return std::make_pair(false, 0); } +bool CPUKernelFactory::CPUKernelSingleAttrCheck(const std::pair &attr_creator, + const KernelBuildInfo &kernel_info) { + for (size_t i = 0; i < kernel_info.GetInputNum(); ++i) { + if (kernel_info.GetInputDeviceType(i) != attr_creator.first.GetInputAttr(i).first) { + MS_LOG(DEBUG) << "cpu kernel attr check failed. input index: " << i << "."; + MS_LOG(DEBUG) << "kernel info type:" << kernel_info.GetInputDeviceType(i) << ", " + << "register type:" << attr_creator.first.GetInputAttr(i).first; + return false; + } + } + for (size_t i = 0; i < kernel_info.GetOutputNum(); ++i) { + if (kernel_info.GetOutputDeviceType(i) != attr_creator.first.GetOutputAttr(i).first) { + MS_LOG(DEBUG) << "cpu kernel attr check failed. output index: " << i << "."; + MS_LOG(DEBUG) << "kernel info type:" << kernel_info.GetOutputDeviceType(i) << ", " + << "register type:" << attr_creator.first.GetOutputAttr(i).first; + return false; + } + } + return true; +} + std::vector CPUKernelFactory::GetSupportedKernelAttrList(const std::string &kernel_name) { std::vector result; auto iter = name_to_attr_creator_.find(kernel_name); diff --git a/mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.h b/mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.h index 4a10c0ba5f575711f8e2c677151c6e5393deb267..b3901d257ecf9bf876f65ec659e90ac6cbb77d34 100644 --- a/mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.h +++ b/mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.h @@ -43,7 +43,9 @@ class CPUKernelFactory { CPUKernelFactory() = default; ~CPUKernelFactory() = default; DISABLE_COPY_AND_ASSIGN(CPUKernelFactory) - std::pair CPUKernelAttrCheck(const std::string &kernel_name, const KernelBuildInfo *kernel_info); + std::pair CPUKernelAttrCheck(const std::string &kernel_name, const KernelBuildInfo &kernel_info); + bool CPUKernelSingleAttrCheck(const std::pair &attr_creator, + const KernelBuildInfo &kernel_info); std::map>> name_to_attr_creator_; }; diff --git a/tests/st/ops/cpu/test_addn_op.py b/tests/st/ops/cpu/test_addn_op.py new file mode 100644 index 0000000000000000000000000000000000000000..f239313eefb53228b5250c11127b8dfb49454dc4 --- /dev/null +++ b/tests/st/ops/cpu/test_addn_op.py @@ -0,0 +1,78 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest + +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.common import dtype as mstype +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target='CPU') + +class Net2I(nn.Cell): + def __init__(self): + super(Net2I, self).__init__() + self.addn = P.AddN() + + def construct(self, x, y): + return self.addn((x, y)) + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_net_2Input(): + x = np.arange(2 * 3 * 2).reshape(2, 3, 2).astype(np.float32) + y = np.arange(2 * 3 * 2).reshape(2, 3, 2).astype(np.float32) + addn = Net2I() + output = addn(Tensor(x, mstype.float32), Tensor(y, mstype.float32)) + print("output:\n", output) + expect_result = [[[0., 2.], + [4., 6.], + [8., 10.]], + [[12., 14.], + [16., 18.], + [20., 22.]]] + + assert (output.asnumpy() == expect_result).all() + +class Net3I(nn.Cell): + def __init__(self): + super(Net3I, self).__init__() + self.addn = P.AddN() + + def construct(self, x, y, z): + return self.addn((x, y, z)) + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_net_3Input(): + x = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + y = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + z = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + addn = Net3I() + output = addn(Tensor(x, mstype.float32), Tensor(y, mstype.float32), Tensor(z, mstype.float32)) + print("output:\n", output) + expect_result = [[0., 3., 6.], + [9., 12., 15]] + + assert (output.asnumpy() == expect_result).all() + +if __name__ == '__main__': + test_net_2Input() + test_net_3Input() diff --git a/tests/st/ops/cpu/test_slice_op.py b/tests/st/ops/cpu/test_slice_op.py index 97890697954dfeeb15811422c317481b19d208b7..0f0aa53d04bb106ceadb4193c605b40e2a82b3e9 100644 --- a/tests/st/ops/cpu/test_slice_op.py +++ b/tests/st/ops/cpu/test_slice_op.py @@ -1,4 +1,4 @@ -# Copyright 2019 Huawei Technologies Co., Ltd +# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.