Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
f93e6beb
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f93e6beb
编写于
5月 28, 2020
作者:
K
kswang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add cpu strided slice
上级
fb7e4eac
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
190 addition
and
47 deletion
+190
-47
mindspore/ccsrc/kernel/cpu/cpu_kernel.h
mindspore/ccsrc/kernel/cpu/cpu_kernel.h
+1
-0
mindspore/ccsrc/kernel/cpu/slice_cpu_kernel.cc
mindspore/ccsrc/kernel/cpu/slice_cpu_kernel.cc
+42
-20
mindspore/ccsrc/kernel/cpu/slice_cpu_kernel.h
mindspore/ccsrc/kernel/cpu/slice_cpu_kernel.h
+4
-1
mindspore/ccsrc/kernel/cpu/slice_grad_cpu_kernel.cc
mindspore/ccsrc/kernel/cpu/slice_grad_cpu_kernel.cc
+45
-24
mindspore/ccsrc/kernel/cpu/slice_grad_cpu_kernel.h
mindspore/ccsrc/kernel/cpu/slice_grad_cpu_kernel.h
+4
-2
tests/st/ops/cpu/test_stridedslice_grad_op.py
tests/st/ops/cpu/test_stridedslice_grad_op.py
+49
-0
tests/st/ops/cpu/test_stridedslice_op.py
tests/st/ops/cpu/test_stridedslice_op.py
+45
-0
未找到文件。
mindspore/ccsrc/kernel/cpu/cpu_kernel.h
浏览文件 @
f93e6beb
...
...
@@ -47,6 +47,7 @@ const char TRANSPOSE_NO = 'N';
const
char
TRANSPOSE_YES
=
'T'
;
const
char
AXIS
[]
=
"axis"
;
const
char
BEGIN
[]
=
"begin"
;
const
char
END
[]
=
"end"
;
const
char
SIZE
[]
=
"size"
;
class
CPUKernel
:
public
kernel
::
KernelMod
{
...
...
mindspore/ccsrc/kernel/cpu/slice_cpu_kernel.cc
浏览文件 @
f93e6beb
...
...
@@ -21,31 +21,53 @@ namespace mindspore {
namespace
kernel
{
void
SliceCPUKernel
::
InitKernel
(
const
CNodePtr
&
kernel_node
)
{
CheckParam
(
kernel_node
);
begin_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
BEGIN
);
size_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
SIZE
);
input_shape_
=
AnfAlgo
::
GetPrevNodeOutputInferShape
(
kernel_node
,
0
);
if
(
input_shape_
.
size
()
<
4
)
{
for
(
size_t
i
=
0
;
i
<
4
-
input_shape_
.
size
();
++
i
)
{
input_shape_
.
insert
(
input_shape_
.
begin
(),
1
);
begin_
.
insert
(
begin_
.
begin
(),
0
);
size_
.
insert
(
size_
.
begin
(),
1
);
}
}
output_shape_
=
AnfAlgo
::
GetOutputInferShape
(
kernel_node
,
0
);
CPUKernelUtils
::
ExpandDimsTo4
(
&
output_shape_
);
begin_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
BEGIN
);
for
(
size_t
i
=
0
;
i
<
begin_
.
size
();
i
++
)
{
if
(
begin_
[
i
]
<
0
)
{
begin_
[
i
]
=
begin_
[
i
]
+
input_shape_
[
i
];
}
}
for
(
size_t
i
=
0
;
i
<
size_
.
size
();
i
++
)
{
if
(
size_
[
i
]
<
0
)
{
size_
[
i
]
=
(
size_
[
i
]
+
input_shape_
[
i
])
>
0
?
(
size_
[
i
]
+
input_shape_
[
i
])
:
0
;
auto
prim
=
AnfAlgo
::
GetCNodePrimitive
(
kernel_node
);
MS_EXCEPTION_IF_NULL
(
prim
);
auto
strides
=
prim
->
GetAttr
(
STRIDES
);
if
(
strides
!=
nullptr
)
{
strides_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
STRIDES
);
end_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
END
);
if
(
strides_
.
size
()
!=
end_
.
size
()
||
strides_
.
size
()
!=
input_shape_
.
size
())
{
MS_LOG
(
EXCEPTION
)
<<
"stride|end|input size must be equal"
;
}
for
(
size_t
i
=
0
;
i
<
strides_
.
size
();
++
i
)
{
if
(
strides_
[
i
]
<
0
)
{
strides_
[
i
]
=
(
strides_
[
i
]
+
input_shape_
[
i
])
>
0
?
(
strides_
[
i
]
+
input_shape_
[
i
])
:
0
;
}
if
(
end_
[
i
]
<
0
)
{
end_
[
i
]
=
(
end_
[
i
]
+
input_shape_
[
i
])
>
0
?
(
end_
[
i
]
+
input_shape_
[
i
])
:
0
;
}
}
}
else
{
auto
sizes
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
SIZE
);
if
(
sizes
.
size
()
!=
input_shape_
.
size
()
||
begin_
.
size
()
!=
input_shape_
.
size
())
{
MS_LOG
(
EXCEPTION
)
<<
"begin|size|input size must be equal"
;
}
for
(
size_t
i
=
0
;
i
<
sizes
.
size
();
++
i
)
{
if
(
sizes
[
i
]
<
0
)
{
sizes
[
i
]
=
(
sizes
[
i
]
+
input_shape_
[
i
])
>
0
?
(
sizes
[
i
]
+
input_shape_
[
i
])
:
0
;
}
strides_
.
emplace_back
(
1
);
end_
.
emplace_back
(
begin_
[
i
]
+
sizes
[
i
]);
}
}
auto
input_len
=
input_shape_
.
size
();
if
(
input_len
<
4
)
{
for
(
size_t
i
=
0
;
i
<
4
-
input_len
;
++
i
)
{
input_shape_
.
insert
(
input_shape_
.
begin
(),
1
);
begin_
.
insert
(
begin_
.
begin
(),
0
);
strides_
.
insert
(
strides_
.
begin
(),
1
);
end_
.
insert
(
end_
.
begin
(),
1
);
}
}
}
...
...
@@ -56,10 +78,10 @@ bool SliceCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
auto
input_addr
=
reinterpret_cast
<
float
*>
(
inputs
[
0
]
->
addr
);
auto
output_addr
=
reinterpret_cast
<
float
*>
(
outputs
[
0
]
->
addr
);
for
(
int
i
=
begin_
[
0
];
i
<
begin_
[
0
]
+
size_
[
0
];
++
i
)
{
for
(
int
j
=
begin_
[
1
];
j
<
begin_
[
1
]
+
size_
[
1
];
++
j
)
{
for
(
int
k
=
begin_
[
2
];
k
<
begin_
[
2
]
+
size_
[
2
];
++
k
)
{
for
(
int
m
=
begin_
[
3
];
m
<
begin_
[
3
]
+
size_
[
3
];
++
m
)
{
for
(
int
i
=
begin_
[
0
];
i
<
end_
[
0
];
i
+=
strides_
[
0
]
)
{
for
(
int
j
=
begin_
[
1
];
j
<
end_
[
1
];
j
+=
strides_
[
1
]
)
{
for
(
int
k
=
begin_
[
2
];
k
<
end_
[
2
];
k
+=
strides_
[
2
]
)
{
for
(
int
m
=
begin_
[
3
];
m
<
end_
[
3
];
m
+=
strides_
[
3
]
)
{
auto
offset
=
CPUKernelUtils
::
CalcOffset
(
input_shape_
,
i
,
j
,
k
,
m
);
*
output_addr
++
=
input_addr
[
offset
];
}
...
...
mindspore/ccsrc/kernel/cpu/slice_cpu_kernel.h
浏览文件 @
f93e6beb
...
...
@@ -35,13 +35,16 @@ class SliceCPUKernel : public CPUKernel {
private:
void
CheckParam
(
const
CNodePtr
&
kernel_node
);
std
::
vector
<
int
>
begin_
;
std
::
vector
<
int
>
size_
;
std
::
vector
<
int
>
end_
;
std
::
vector
<
int
>
strides_
;
std
::
vector
<
size_t
>
input_shape_
;
std
::
vector
<
size_t
>
output_shape_
;
};
MS_REG_CPU_KERNEL
(
Slice
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat32
).
AddOutputAttr
(
kNumberTypeFloat32
),
SliceCPUKernel
);
MS_REG_CPU_KERNEL
(
StridedSlice
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat32
).
AddOutputAttr
(
kNumberTypeFloat32
),
SliceCPUKernel
);
}
// namespace kernel
}
// namespace mindspore
...
...
mindspore/ccsrc/kernel/cpu/slice_grad_cpu_kernel.cc
浏览文件 @
f93e6beb
...
...
@@ -21,33 +21,54 @@ namespace mindspore {
namespace
kernel
{
void
SliceGradCPUKernel
::
InitKernel
(
const
CNodePtr
&
kernel_node
)
{
CheckParam
(
kernel_node
);
begin_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
BEGIN
);
size_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
SIZE
);
input_dy_shape_
=
AnfAlgo
::
GetPrevNodeOutputInferShape
(
kernel_node
,
0
);
if
(
input_dy_shape_
.
size
()
<
4
)
{
for
(
size_t
i
=
0
;
i
<
4
-
input_dy_shape_
.
size
();
++
i
)
{
input_dy_shape_
.
insert
(
input_dy_shape_
.
begin
(),
1
);
begin_
.
insert
(
begin_
.
begin
(),
0
);
size_
.
insert
(
size_
.
begin
(),
1
);
}
}
input_x_shape_
=
AnfAlgo
::
GetPrevNodeOutputInferShape
(
kernel_node
,
1
);
output_dx_shape_
=
AnfAlgo
::
GetOutputInferShape
(
kernel_node
,
0
);
CPUKernelUtils
::
ExpandDimsTo4
(
&
input_x_shape_
);
CPUKernelUtils
::
ExpandDimsTo4
(
&
output_dx_shape_
);
input_dy_shape_
=
AnfAlgo
::
GetPrevNodeOutputInferShape
(
kernel_node
,
0
);
begin_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
BEGIN
);
for
(
size_t
i
=
0
;
i
<
begin_
.
size
();
i
++
)
{
if
(
begin_
[
i
]
<
0
)
{
begin_
[
i
]
=
begin_
[
i
]
+
input_
x_shape_
[
i
];
begin_
[
i
]
=
begin_
[
i
]
+
output_d
x_shape_
[
i
];
}
}
for
(
size_t
i
=
0
;
i
<
size_
.
size
();
i
++
)
{
if
(
size_
[
i
]
<
0
)
{
size_
[
i
]
=
(
size_
[
i
]
+
input_x_shape_
[
i
])
>
0
?
(
size_
[
i
]
+
input_x_shape_
[
i
])
:
0
;
auto
prim
=
AnfAlgo
::
GetCNodePrimitive
(
kernel_node
);
MS_EXCEPTION_IF_NULL
(
prim
);
auto
strides
=
prim
->
GetAttr
(
STRIDES
);
if
(
strides
!=
nullptr
)
{
strides_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
STRIDES
);
end_
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
END
);
if
(
strides_
.
size
()
!=
end_
.
size
()
||
strides_
.
size
()
!=
output_dx_shape_
.
size
())
{
MS_LOG
(
EXCEPTION
)
<<
"stride|end|input size must be equal"
;
}
for
(
size_t
i
=
0
;
i
<
strides_
.
size
();
++
i
)
{
if
(
strides_
[
i
]
<
0
)
{
strides_
[
i
]
=
(
strides_
[
i
]
+
output_dx_shape_
[
i
])
>
0
?
(
strides_
[
i
]
+
output_dx_shape_
[
i
])
:
0
;
}
if
(
end_
[
i
]
<
0
)
{
end_
[
i
]
=
(
end_
[
i
]
+
output_dx_shape_
[
i
])
>
0
?
(
end_
[
i
]
+
output_dx_shape_
[
i
])
:
0
;
}
}
}
else
{
auto
sizes
=
AnfAlgo
::
GetNodeAttr
<
std
::
vector
<
int
>>
(
kernel_node
,
SIZE
);
if
(
sizes
.
size
()
!=
output_dx_shape_
.
size
()
||
begin_
.
size
()
!=
output_dx_shape_
.
size
())
{
MS_LOG
(
EXCEPTION
)
<<
"begin|size|input size must be equal"
;
}
for
(
size_t
i
=
0
;
i
<
sizes
.
size
();
++
i
)
{
if
(
sizes
[
i
]
<
0
)
{
sizes
[
i
]
=
(
sizes
[
i
]
+
output_dx_shape_
[
i
])
>
0
?
(
sizes
[
i
]
+
output_dx_shape_
[
i
])
:
0
;
}
strides_
.
emplace_back
(
1
);
end_
.
emplace_back
(
begin_
[
i
]
+
sizes
[
i
]);
}
}
CPUKernelUtils
::
ExpandDimsTo4
(
&
output_dx_shape_
);
auto
input_len
=
input_dy_shape_
.
size
();
if
(
input_len
<
4
)
{
for
(
size_t
i
=
0
;
i
<
4
-
input_len
;
++
i
)
{
input_dy_shape_
.
insert
(
input_dy_shape_
.
begin
(),
1
);
begin_
.
insert
(
begin_
.
begin
(),
0
);
strides_
.
insert
(
strides_
.
begin
(),
1
);
end_
.
insert
(
end_
.
begin
(),
1
);
}
}
}
...
...
@@ -65,10 +86,10 @@ bool SliceGradCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
return
false
;
}
for
(
int
i
=
begin_
[
0
];
i
<
begin_
[
0
]
+
size_
[
0
];
++
i
)
{
for
(
int
j
=
begin_
[
1
];
j
<
begin_
[
1
]
+
size_
[
1
];
++
j
)
{
for
(
int
k
=
begin_
[
2
];
k
<
begin_
[
2
]
+
size_
[
2
];
++
k
)
{
for
(
int
m
=
begin_
[
3
];
m
<
begin_
[
3
]
+
size_
[
3
];
++
m
)
{
for
(
int
i
=
begin_
[
0
];
i
<
end_
[
0
];
i
+=
strides_
[
0
]
)
{
for
(
int
j
=
begin_
[
1
];
j
<
end_
[
1
];
j
+=
strides_
[
1
]
)
{
for
(
int
k
=
begin_
[
2
];
k
<
end_
[
2
];
k
+=
strides_
[
2
]
)
{
for
(
int
m
=
begin_
[
3
];
m
<
end_
[
3
];
m
+=
strides_
[
3
]
)
{
auto
offset
=
CPUKernelUtils
::
CalcOffset
(
output_dx_shape_
,
i
,
j
,
k
,
m
);
output_dx_addr
[
offset
]
=
*
input_dy_addr
++
;
}
...
...
mindspore/ccsrc/kernel/cpu/slice_grad_cpu_kernel.h
浏览文件 @
f93e6beb
...
...
@@ -35,9 +35,9 @@ class SliceGradCPUKernel : public CPUKernel {
private:
void
CheckParam
(
const
CNodePtr
&
kernel_node
);
std
::
vector
<
int
>
begin_
;
std
::
vector
<
int
>
size_
;
std
::
vector
<
int
>
end_
;
std
::
vector
<
int
>
strides_
;
std
::
vector
<
size_t
>
input_dy_shape_
;
std
::
vector
<
size_t
>
input_x_shape_
;
std
::
vector
<
size_t
>
output_dx_shape_
;
};
...
...
@@ -45,6 +45,8 @@ MS_REG_CPU_KERNEL(
SliceGrad
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat32
).
AddInputAttr
(
kNumberTypeFloat32
).
AddOutputAttr
(
kNumberTypeFloat32
),
SliceGradCPUKernel
);
MS_REG_CPU_KERNEL
(
StridedSliceGrad
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat32
).
AddOutputAttr
(
kNumberTypeFloat32
),
SliceGradCPUKernel
);
}
// namespace kernel
}
// namespace mindspore
...
...
tests/st/ops/cpu/test_stridedslice_grad_op.py
0 → 100644
浏览文件 @
f93e6beb
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
pytest
import
mindspore.context
as
context
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore.common.api
import
ms_function
from
mindspore.ops
import
operations
as
P
from
mindspore.ops.operations
import
_grad_ops
as
G
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
'CPU'
)
class
StridedSliceGrad
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
StridedSliceGrad
,
self
).
__init__
()
self
.
ssg
=
G
.
StridedSliceGrad
()
self
.
shape
=
P
.
Shape
()
@
ms_function
def
construct
(
self
,
dy
,
x
):
return
self
.
ssg
(
dy
,
self
.
shape
(
x
),
(
2
,
0
,
0
),
(
3
,
2
,
3
),
(
1
,
1
,
1
))
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_cpu_training
@
pytest
.
mark
.
env_onecard
def
test_slice
():
x
=
Tensor
(
np
.
array
([[[
1.
,
1.
,
1.
],
[
2
,
2
,
2
]],
[[
3
,
3
,
3
],
[
4
,
4
,
4
]],
[[
5
,
5
,
5
],
[
6
,
7
,
8
]]]).
astype
(
np
.
float32
))
dy
=
Tensor
(
np
.
array
([[[
5.
,
1.
,
5.
],
[
6.
,
1.
,
8.
]]]).
astype
(
np
.
float32
))
ssg
=
StridedSliceGrad
()
output
=
ssg
(
dy
,
x
)
expect
=
[[[
0
,
0
,
0
],
[
0
,
0
,
0
]],
[[
0
,
0
,
0
],
[
0
,
0
,
0
]],
[[
5
,
1
,
5
],
[
6
,
1
,
8
]]]
assert
(
output
.
asnumpy
()
==
expect
).
all
()
tests/st/ops/cpu/test_stridedslice_op.py
0 → 100644
浏览文件 @
f93e6beb
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
pytest
import
mindspore.context
as
context
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore.ops
import
operations
as
P
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
'CPU'
)
class
StridedSlice
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
StridedSlice
,
self
).
__init__
()
self
.
stridedslice
=
P
.
StridedSlice
()
def
construct
(
self
,
x
):
return
self
.
stridedslice
(
x
,
(
2
,
0
,
0
),
(
3
,
2
,
3
),
(
1
,
1
,
1
))
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_cpu_training
@
pytest
.
mark
.
env_onecard
def
test_slice
():
x
=
Tensor
(
np
.
array
([[[
1.
,
1.
,
1.
],
[
2
,
2
,
2
]],
[[
3
,
3
,
3
],
[
4
,
4
,
4
]],
[[
5
,
5
,
5
],
[
6
,
7
,
8
]]]).
astype
(
np
.
float32
))
stridedslice
=
StridedSlice
()
output
=
stridedslice
(
x
)
expect
=
[[[
5.
,
5.
,
5.
],
[
6.
,
7.
,
8.
]]]
assert
(
output
.
asnumpy
()
==
expect
).
all
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录