Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
2753aa5a
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2753aa5a
编写于
5月 26, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 26, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1487 add cpu kernel "AddN"
Merge pull request !1487 from sunsuodong/addn
上级
6bf12a2e
ade90be4
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
230 addition
and
21 deletion
+230
-21
mindspore/ccsrc/device/cpu/kernel_select_cpu.cc
mindspore/ccsrc/device/cpu/kernel_select_cpu.cc
+1
-1
mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.cc
mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.cc
+66
-0
mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.h
mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.h
+56
-0
mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.cc
mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.cc
+25
-18
mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.h
mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.h
+3
-1
tests/st/ops/cpu/test_addn_op.py
tests/st/ops/cpu/test_addn_op.py
+78
-0
tests/st/ops/cpu/test_slice_op.py
tests/st/ops/cpu/test_slice_op.py
+1
-1
未找到文件。
mindspore/ccsrc/device/cpu/kernel_select_cpu.cc
浏览文件 @
2753aa5a
...
...
@@ -85,7 +85,7 @@ bool IsInputFormatDtypeMatched(const KernelAttr &kernel_attr, const std::vector<
const
std
::
vector
<
TypeId
>
&
input_types
,
const
std
::
vector
<
size_t
>
&
input_not_cnode_indexes
)
{
if
(
kernel_attr
.
GetInputSize
()
!=
input_types
.
size
())
{
MS_LOG
(
ERROR
)
<<
"required input num:"
<<
kernel_attr
.
GetInputSize
()
<<
", actual input num:"
<<
input_types
.
size
();
MS_LOG
(
DEBUG
)
<<
"required input num:"
<<
kernel_attr
.
GetInputSize
()
<<
", actual input num:"
<<
input_types
.
size
();
return
false
;
}
auto
input_num
=
input_types
.
size
();
...
...
mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.cc
0 → 100644
浏览文件 @
2753aa5a
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel/cpu/addn_cpu_kernel.h"
#include "device/cpu/cpu_device_address.h"
#include "ir/primitive.h"
namespace
mindspore
{
namespace
kernel
{
void
AddNCPUKernel
::
InitKernel
(
const
CNodePtr
&
kernel_node
)
{
CheckParam
(
kernel_node
);
input_num_
=
AnfAlgo
::
GetInputTensorNum
(
kernel_node
);
output_shape_
=
AnfAlgo
::
GetOutputInferShape
(
kernel_node
,
0
);
CPUKernelUtils
::
ExpandDimsTo4
(
&
output_shape_
);
}
bool
AddNCPUKernel
::
Launch
(
const
std
::
vector
<
kernel
::
AddressPtr
>
&
inputs
,
const
std
::
vector
<
kernel
::
AddressPtr
>
&
/*workspace*/
,
const
std
::
vector
<
kernel
::
AddressPtr
>
&
outputs
)
{
auto
output_addr
=
reinterpret_cast
<
float
*>
(
outputs
[
0
]
->
addr
);
for
(
size_t
i
=
0
;
i
<
output_shape_
[
0
];
++
i
)
{
for
(
size_t
j
=
0
;
j
<
output_shape_
[
1
];
++
j
)
{
for
(
size_t
k
=
0
;
k
<
output_shape_
[
2
];
++
k
)
{
for
(
size_t
m
=
0
;
m
<
output_shape_
[
3
];
++
m
)
{
auto
offset
=
CPUKernelUtils
::
CalcOffset
(
output_shape_
,
i
,
j
,
k
,
m
);
float
sum
=
0
;
for
(
size_t
index
=
0
;
index
<
input_num_
;
++
index
)
{
auto
input_addr
=
reinterpret_cast
<
float
*>
(
inputs
[
index
]
->
addr
);
sum
+=
input_addr
[
offset
];
}
output_addr
[
offset
]
=
sum
;
}
}
}
}
return
true
;
}
void
AddNCPUKernel
::
CheckParam
(
const
CNodePtr
&
kernel_node
)
{
auto
input_shape
=
AnfAlgo
::
GetPrevNodeOutputInferShape
(
kernel_node
,
0
);
if
(
input_shape
.
size
()
>
4
)
{
MS_LOG
(
EXCEPTION
)
<<
"Input dims is "
<<
input_shape
.
size
()
<<
", but AddNCPUKernel olny support 4d or lower."
;
}
size_t
output_num
=
AnfAlgo
::
GetOutputTensorNum
(
kernel_node
);
if
(
output_num
!=
1
)
{
MS_LOG
(
EXCEPTION
)
<<
"Output number is "
<<
output_num
<<
", but AddNCPUKernel needs 1 output."
;
}
}
}
// namespace kernel
}
// namespace mindspore
mindspore/ccsrc/kernel/cpu/addn_cpu_kernel.h
0 → 100644
浏览文件 @
2753aa5a
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_CPU_ADDN_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_KERNEL_CPU_ADDN_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include "kernel/cpu/cpu_kernel.h"
#include "kernel/cpu/cpu_kernel_factory.h"
namespace
mindspore
{
namespace
kernel
{
class
AddNCPUKernel
:
public
CPUKernel
{
public:
AddNCPUKernel
()
:
input_num_
(
0
)
{}
~
AddNCPUKernel
()
override
=
default
;
void
InitKernel
(
const
CNodePtr
&
kernel_node
)
override
;
bool
Launch
(
const
std
::
vector
<
AddressPtr
>
&
inputs
,
const
std
::
vector
<
AddressPtr
>
&
workspace
,
const
std
::
vector
<
AddressPtr
>
&
outputs
)
override
;
private:
void
CheckParam
(
const
CNodePtr
&
kernel_node
);
size_t
input_num_
;
std
::
vector
<
size_t
>
output_shape_
;
};
MS_REG_CPU_KERNEL
(
AddN
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat32
).
AddInputAttr
(
kNumberTypeFloat32
).
AddOutputAttr
(
kNumberTypeFloat32
),
AddNCPUKernel
);
MS_REG_CPU_KERNEL
(
AddN
,
KernelAttr
()
.
AddInputAttr
(
kNumberTypeFloat32
)
.
AddInputAttr
(
kNumberTypeFloat32
)
.
AddInputAttr
(
kNumberTypeFloat32
)
.
AddOutputAttr
(
kNumberTypeFloat32
),
AddNCPUKernel
);
}
// namespace kernel
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_KERNEL_CPU_ADDN_CPU_KERNEL_H_
mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.cc
浏览文件 @
2753aa5a
...
...
@@ -42,7 +42,7 @@ std::shared_ptr<CPUKernel> CPUKernelFactory::Create(const std::string &kernel_na
MS_EXCEPTION_IF_NULL
(
kernel_info
);
const
KernelBuildInfo
*
kernel_build_Info
=
kernel_info
->
select_kernel_build_info
();
MS_EXCEPTION_IF_NULL
(
kernel_build_Info
);
std
::
pair
<
bool
,
size_t
>
ret_pair
=
CPUKernelAttrCheck
(
kernel_name
,
kernel_build_Info
);
std
::
pair
<
bool
,
size_t
>
ret_pair
=
CPUKernelAttrCheck
(
kernel_name
,
*
kernel_build_Info
);
if
(
ret_pair
.
first
)
{
return
(
name_to_attr_creator_
.
find
(
kernel_name
)
->
second
)[
ret_pair
.
second
].
second
();
}
...
...
@@ -50,7 +50,7 @@ std::shared_ptr<CPUKernel> CPUKernelFactory::Create(const std::string &kernel_na
}
std
::
pair
<
bool
,
size_t
>
CPUKernelFactory
::
CPUKernelAttrCheck
(
const
std
::
string
&
kernel_name
,
const
KernelBuildInfo
*
kernel_info
)
{
const
KernelBuildInfo
&
kernel_info
)
{
auto
iter
=
name_to_attr_creator_
.
find
(
kernel_name
);
if
(
iter
==
name_to_attr_creator_
.
end
())
{
MS_LOG
(
INFO
)
<<
"Not registered CPU kernel: op["
<<
kernel_name
<<
"]!"
;
...
...
@@ -59,27 +59,34 @@ std::pair<bool, size_t> CPUKernelFactory::CPUKernelAttrCheck(const std::string &
auto
creators
=
iter
->
second
;
for
(
size_t
index
=
0
;
index
<
creators
.
size
();
++
index
)
{
auto
attr_creator
=
creators
[
index
];
for
(
size_t
i
=
0
;
i
<
kernel_info
->
GetInputNum
();
++
i
)
{
if
(
kernel_info
->
GetInputDeviceType
(
i
)
!=
attr_creator
.
first
.
GetInputAttr
(
i
).
first
)
{
MS_LOG
(
WARNING
)
<<
"cpu kernel attr check failed. input index: "
<<
i
<<
"."
;
MS_LOG
(
WARNING
)
<<
"kernel info type:"
<<
kernel_info
->
GetInputDeviceType
(
i
)
<<
", "
<<
"register type:"
<<
attr_creator
.
first
.
GetInputAttr
(
i
).
first
;
return
std
::
make_pair
(
false
,
0
);
}
if
(
CPUKernelSingleAttrCheck
(
attr_creator
,
kernel_info
))
{
return
std
::
make_pair
(
true
,
index
);
}
for
(
size_t
i
=
0
;
i
<
kernel_info
->
GetOutputNum
();
++
i
)
{
if
(
kernel_info
->
GetOutputDeviceType
(
i
)
!=
attr_creator
.
first
.
GetOutputAttr
(
i
).
first
)
{
MS_LOG
(
WARNING
)
<<
"cpu kernel attr check failed. output index: "
<<
i
<<
"."
;
MS_LOG
(
WARNING
)
<<
"kernel info type:"
<<
kernel_info
->
GetOutputDeviceType
(
i
)
<<
", "
<<
"register type:"
<<
attr_creator
.
first
.
GetOutputAttr
(
i
).
first
;
return
std
::
make_pair
(
false
,
0
);
}
}
return
std
::
make_pair
(
true
,
index
);
}
return
std
::
make_pair
(
false
,
0
);
}
bool
CPUKernelFactory
::
CPUKernelSingleAttrCheck
(
const
std
::
pair
<
KernelAttr
,
CPUKernelCreator
>
&
attr_creator
,
const
KernelBuildInfo
&
kernel_info
)
{
for
(
size_t
i
=
0
;
i
<
kernel_info
.
GetInputNum
();
++
i
)
{
if
(
kernel_info
.
GetInputDeviceType
(
i
)
!=
attr_creator
.
first
.
GetInputAttr
(
i
).
first
)
{
MS_LOG
(
DEBUG
)
<<
"cpu kernel attr check failed. input index: "
<<
i
<<
"."
;
MS_LOG
(
DEBUG
)
<<
"kernel info type:"
<<
kernel_info
.
GetInputDeviceType
(
i
)
<<
", "
<<
"register type:"
<<
attr_creator
.
first
.
GetInputAttr
(
i
).
first
;
return
false
;
}
}
for
(
size_t
i
=
0
;
i
<
kernel_info
.
GetOutputNum
();
++
i
)
{
if
(
kernel_info
.
GetOutputDeviceType
(
i
)
!=
attr_creator
.
first
.
GetOutputAttr
(
i
).
first
)
{
MS_LOG
(
DEBUG
)
<<
"cpu kernel attr check failed. output index: "
<<
i
<<
"."
;
MS_LOG
(
DEBUG
)
<<
"kernel info type:"
<<
kernel_info
.
GetOutputDeviceType
(
i
)
<<
", "
<<
"register type:"
<<
attr_creator
.
first
.
GetOutputAttr
(
i
).
first
;
return
false
;
}
}
return
true
;
}
std
::
vector
<
KernelAttr
>
CPUKernelFactory
::
GetSupportedKernelAttrList
(
const
std
::
string
&
kernel_name
)
{
std
::
vector
<
KernelAttr
>
result
;
auto
iter
=
name_to_attr_creator_
.
find
(
kernel_name
);
...
...
mindspore/ccsrc/kernel/cpu/cpu_kernel_factory.h
浏览文件 @
2753aa5a
...
...
@@ -43,7 +43,9 @@ class CPUKernelFactory {
CPUKernelFactory
()
=
default
;
~
CPUKernelFactory
()
=
default
;
DISABLE_COPY_AND_ASSIGN
(
CPUKernelFactory
)
std
::
pair
<
bool
,
size_t
>
CPUKernelAttrCheck
(
const
std
::
string
&
kernel_name
,
const
KernelBuildInfo
*
kernel_info
);
std
::
pair
<
bool
,
size_t
>
CPUKernelAttrCheck
(
const
std
::
string
&
kernel_name
,
const
KernelBuildInfo
&
kernel_info
);
bool
CPUKernelSingleAttrCheck
(
const
std
::
pair
<
KernelAttr
,
CPUKernelCreator
>
&
attr_creator
,
const
KernelBuildInfo
&
kernel_info
);
std
::
map
<
std
::
string
,
std
::
vector
<
std
::
pair
<
KernelAttr
,
CPUKernelCreator
>>>
name_to_attr_creator_
;
};
...
...
tests/st/ops/cpu/test_addn_op.py
0 → 100644
浏览文件 @
2753aa5a
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
pytest
import
mindspore.context
as
context
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore.common
import
dtype
as
mstype
from
mindspore.ops
import
operations
as
P
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
'CPU'
)
class
Net2I
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net2I
,
self
).
__init__
()
self
.
addn
=
P
.
AddN
()
def
construct
(
self
,
x
,
y
):
return
self
.
addn
((
x
,
y
))
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_cpu
@
pytest
.
mark
.
env_onecard
def
test_net_2Input
():
x
=
np
.
arange
(
2
*
3
*
2
).
reshape
(
2
,
3
,
2
).
astype
(
np
.
float32
)
y
=
np
.
arange
(
2
*
3
*
2
).
reshape
(
2
,
3
,
2
).
astype
(
np
.
float32
)
addn
=
Net2I
()
output
=
addn
(
Tensor
(
x
,
mstype
.
float32
),
Tensor
(
y
,
mstype
.
float32
))
print
(
"output:
\n
"
,
output
)
expect_result
=
[[[
0.
,
2.
],
[
4.
,
6.
],
[
8.
,
10.
]],
[[
12.
,
14.
],
[
16.
,
18.
],
[
20.
,
22.
]]]
assert
(
output
.
asnumpy
()
==
expect_result
).
all
()
class
Net3I
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net3I
,
self
).
__init__
()
self
.
addn
=
P
.
AddN
()
def
construct
(
self
,
x
,
y
,
z
):
return
self
.
addn
((
x
,
y
,
z
))
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_cpu
@
pytest
.
mark
.
env_onecard
def
test_net_3Input
():
x
=
np
.
arange
(
2
*
3
).
reshape
(
2
,
3
).
astype
(
np
.
float32
)
y
=
np
.
arange
(
2
*
3
).
reshape
(
2
,
3
).
astype
(
np
.
float32
)
z
=
np
.
arange
(
2
*
3
).
reshape
(
2
,
3
).
astype
(
np
.
float32
)
addn
=
Net3I
()
output
=
addn
(
Tensor
(
x
,
mstype
.
float32
),
Tensor
(
y
,
mstype
.
float32
),
Tensor
(
z
,
mstype
.
float32
))
print
(
"output:
\n
"
,
output
)
expect_result
=
[[
0.
,
3.
,
6.
],
[
9.
,
12.
,
15
]]
assert
(
output
.
asnumpy
()
==
expect_result
).
all
()
if
__name__
==
'__main__'
:
test_net_2Input
()
test_net_3Input
()
tests/st/ops/cpu/test_slice_op.py
浏览文件 @
2753aa5a
# Copyright 20
19
Huawei Technologies Co., Ltd
# Copyright 20
20
Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录