Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
788f2571
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
788f2571
编写于
5月 06, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 06, 2020
浏览文件
操作
浏览文件
下载
差异文件
!925 Add TBE op registration of AdamApplyOne\ApplyFtrl\GatherNd for VM.
Merge pull request !925 from liuxiao/ops-for-VM
上级
6e9256e6
4e504d7a
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
170 addition
and
2 deletion
+170
-2
mindspore/ops/_op_impl/tbe/__init__.py
mindspore/ops/_op_impl/tbe/__init__.py
+5
-2
mindspore/ops/_op_impl/tbe/adam_apply_one.py
mindspore/ops/_op_impl/tbe/adam_apply_one.py
+53
-0
mindspore/ops/_op_impl/tbe/apply_ftrl.py
mindspore/ops/_op_impl/tbe/apply_ftrl.py
+65
-0
mindspore/ops/_op_impl/tbe/gather_nd.py
mindspore/ops/_op_impl/tbe/gather_nd.py
+47
-0
mindspore/ops/_op_impl/tbe/strided_slice_d.py
mindspore/ops/_op_impl/tbe/strided_slice_d.py
+0
-0
mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py
mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py
+0
-0
未找到文件。
mindspore/ops/_op_impl/tbe/__init__.py
浏览文件 @
788f2571
...
...
@@ -19,8 +19,10 @@ from .abs_grad import _abs_grad_tbe
from
.adam_apply_one_with_decay
import
_adam_apply_one_with_decay_tbe
from
.add
import
_add_tbe
from
.add_n
import
_add_n_tbe
from
.apply_ftrl
import
_apply_ftrl_tbe
from
.apply_momentum
import
_apply_momentum_tbe
from
.apply_adam
import
_apply_adam_tbe
from
.adam_apply_one
import
_adam_apply_one_tbe
from
.assign
import
_assign_tbe
from
.assign_add
import
_assign_add_tbe
from
.assign_sub
import
_assign_sub_tbe
...
...
@@ -64,13 +66,14 @@ from .reduce_mean import _reduce_mean_tbe
from
.tile
import
_tile_tbe
from
.atomic_addr_clean
import
_atomic_addr_clean_tbe
from
.gather_v2
import
_gather_v2_tbe
from
.gather_nd
import
_gather_nd_tbe
from
.bn_training_reduce
import
_bn_training_reduce_tbe
from
.bn_training_reduce_grad
import
_bn_training_reduce_grad_tbe
from
.bn_training_update
import
_bn_training_update_tbe
from
.bn_training_update_grad
import
_bn_training_update_grad_tbe
from
.reciprocal
import
_reciprocal_tbe
from
.strideslice_d
import
_strided_slice_d_tbe
from
.stride
slice
grad_d
import
_strided_slice_grad_d_tbe
from
.stride
d_
slice_d
import
_strided_slice_d_tbe
from
.stride
d_slice_
grad_d
import
_strided_slice_grad_d_tbe
from
.split_d
import
_split_d_tbe
from
.exp
import
_exp_tbe
from
.div
import
_div_tbe
...
...
mindspore/ops/_op_impl/tbe/adam_apply_one.py
0 → 100644
浏览文件 @
788f2571
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""AdamApplyOne op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
adam_apply_one_op_info
=
TBERegOp
(
"AdamApplyOne"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"adam_apply_one.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"adam_apply_one"
)
\
.
partial_flag
(
True
)
\
.
input
(
0
,
"input0"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"input1"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"input2"
,
False
,
"required"
,
"all"
)
\
.
input
(
3
,
"input3"
,
False
,
"required"
,
"all"
)
\
.
input
(
4
,
"input4"
,
False
,
"required"
,
"all"
)
\
.
input
(
5
,
"mul0_x"
,
False
,
"required"
,
"all"
)
\
.
input
(
6
,
"mul1_x"
,
False
,
"required"
,
"all"
)
\
.
input
(
7
,
"mul2_x"
,
False
,
"required"
,
"all"
)
\
.
input
(
8
,
"mul3_x"
,
False
,
"required"
,
"all"
)
\
.
input
(
9
,
"add2_y"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"output0"
,
False
,
"required"
,
"all"
)
\
.
output
(
1
,
"output1"
,
False
,
"required"
,
"all"
)
\
.
output
(
2
,
"output2"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
get_op_info
()
@
op_info_register
(
adam_apply_one_op_info
)
def
_adam_apply_one_tbe
():
"""AdamApplyOne TBE register"""
return
mindspore/ops/_op_impl/tbe/apply_ftrl.py
0 → 100644
浏览文件 @
788f2571
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ApplyFtrl op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
apply_ftrl_op_info
=
TBERegOp
(
"ApplyFtrl"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"apply_ftrl.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"apply_ftrl"
)
\
.
partial_flag
(
True
)
\
.
input
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"accum"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"linear"
,
False
,
"required"
,
"all"
)
\
.
input
(
3
,
"grad"
,
False
,
"required"
,
"all"
)
\
.
input
(
4
,
"lr"
,
False
,
"required"
,
"all"
)
\
.
input
(
5
,
"l1"
,
False
,
"required"
,
"all"
)
\
.
input
(
6
,
"l2"
,
False
,
"required"
,
"all"
)
\
.
input
(
7
,
"lr_power"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"var"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_FracZ
)
\
.
dtype_format
(
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_FracZ
)
\
.
dtype_format
(
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
get_op_info
()
@
op_info_register
(
apply_ftrl_op_info
)
def
_apply_ftrl_tbe
():
"""Applyftrl TBE register"""
return
mindspore/ops/_op_impl/tbe/gather_nd.py
0 → 100644
浏览文件 @
788f2571
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""GatherNd op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
gather_nd_op_info
=
TBERegOp
(
"GatherNd"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"gather_nd.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"gather_nd"
)
\
.
partial_flag
(
True
)
\
.
input
(
0
,
"x1"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"x2"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"y"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
I32_Default
,
DataType
.
I32_Default
,
DataType
.
I32_Default
)
\
.
dtype_format
(
DataType
.
I32_Default
,
DataType
.
I64_Default
,
DataType
.
I32_Default
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
I32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
I64_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
I32_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
I64_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
I8_Default
,
DataType
.
I32_Default
,
DataType
.
I8_Default
)
\
.
dtype_format
(
DataType
.
I8_Default
,
DataType
.
I64_Default
,
DataType
.
I8_Default
)
\
.
dtype_format
(
DataType
.
U8_Default
,
DataType
.
I32_Default
,
DataType
.
U8_Default
)
\
.
dtype_format
(
DataType
.
U8_Default
,
DataType
.
I64_Default
,
DataType
.
U8_Default
)
\
.
dtype_format
(
DataType
.
BOOL_Default
,
DataType
.
I32_Default
,
DataType
.
BOOL_Default
)
\
.
dtype_format
(
DataType
.
BOOL_Default
,
DataType
.
I64_Default
,
DataType
.
BOOL_Default
)
\
.
get_op_info
()
@
op_info_register
(
gather_nd_op_info
)
def
_gather_nd_tbe
():
"""GatherNd TBE register"""
return
mindspore/ops/_op_impl/tbe/strideslice_d.py
→
mindspore/ops/_op_impl/tbe/stride
d_
slice_d.py
浏览文件 @
788f2571
文件已移动
mindspore/ops/_op_impl/tbe/stride
slice
grad_d.py
→
mindspore/ops/_op_impl/tbe/stride
d_slice_
grad_d.py
浏览文件 @
788f2571
文件已移动
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录