Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
5c9791a8
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5c9791a8
编写于
4月 15, 2020
作者:
L
liuxiao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add Abs\AbsGrad\Sign\SmoothL1Loss\SmoothL1LossGrad and modify TopKV2->TopK for VM
上级
0a9db34d
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
327 addition
and
10 deletion
+327
-10
mindspore/ccsrc/kernel/tbe/tbe_adapter.cc
mindspore/ccsrc/kernel/tbe/tbe_adapter.cc
+0
-1
mindspore/ops/_op_impl/tbe/__init__.py
mindspore/ops/_op_impl/tbe/__init__.py
+6
-1
mindspore/ops/_op_impl/tbe/abs.py
mindspore/ops/_op_impl/tbe/abs.py
+41
-0
mindspore/ops/_op_impl/tbe/abs_grad.py
mindspore/ops/_op_impl/tbe/abs_grad.py
+44
-0
mindspore/ops/_op_impl/tbe/sign.py
mindspore/ops/_op_impl/tbe/sign.py
+41
-0
mindspore/ops/_op_impl/tbe/smooth_l1_loss.py
mindspore/ops/_op_impl/tbe/smooth_l1_loss.py
+44
-0
mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py
mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py
+45
-0
mindspore/ops/_op_impl/tbe/top_k.py
mindspore/ops/_op_impl/tbe/top_k.py
+7
-7
mindspore/ops/op_info_register.py
mindspore/ops/op_info_register.py
+1
-0
tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss.py
tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss.py
+42
-0
tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss_grad.py
...s/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss_grad.py
+55
-0
tests/st/ops/davinci/test_tbe_ops/test_topk.py
tests/st/ops/davinci/test_tbe_ops/test_topk.py
+1
-1
未找到文件。
mindspore/ccsrc/kernel/tbe/tbe_adapter.cc
浏览文件 @
5c9791a8
...
...
@@ -42,7 +42,6 @@ static std::map<string, string> tbe_func_adapter_map = {
{
"depthwise_conv2d_native"
,
"depthwise_conv2d"
},
{
"depthwise_conv2d_native_backprop_filter"
,
"depthwise_conv2d_backprop_filter_d"
},
{
"depthwise_conv2d_native_backprop_input"
,
"depthwise_conv2d_backprop_input_d"
},
{
"top_kv2"
,
"top_k"
},
{
"scatter_nd"
,
"scatter_nd_d"
},
{
"tile"
,
"tile_d"
},
{
"gather_v2"
,
"gather_v2_d"
},
...
...
mindspore/ops/_op_impl/tbe/__init__.py
浏览文件 @
5c9791a8
...
...
@@ -14,6 +14,8 @@
# ============================================================================
"""tbe ops"""
from
.abs
import
_abs_tbe
from
.abs_grad
import
_abs_grad_tbe
from
.adam_apply_one_with_decay
import
_adam_apply_one_with_decay_tbe
from
.add
import
_add_tbe
from
.add_n
import
_add_n_tbe
...
...
@@ -49,7 +51,7 @@ from .sigmoid_cross_entropy_with_logits import _sigmoid_cross_entropy_with_logit
from
.sigmoid_cross_entropy_with_logits_grad
import
_sigmoid_cross_entropy_with_logits_grad_tbe
from
.tensor_add
import
_tensor_add_tbe
from
.trans_data
import
_trans_data_tbe
from
.top
kv2
import
_topk_v2
_tbe
from
.top
_k
import
_top_k
_tbe
from
.matmul
import
_matmul_tbe
from
.sub
import
_sub_tbe
from
.reduce_mean_d
import
_reduce_mean_d_tbe
...
...
@@ -107,6 +109,7 @@ from .minimum_grad import _minimum_grad_tbe
from
.maximum_grad
import
_maximum_grad_tbe
from
.concat
import
_concat_tbe
from
.slice
import
_slice_tbe
from
.sign
import
_sign_tbe
from
.greater
import
_greater_tbe
from
.clip_by_norm_no_div_sum
import
_clip_by_norm_no_div_sum_tbe
from
.clip_by_value
import
_clip_by_value_tbe
...
...
@@ -130,6 +133,8 @@ from .resize_nearest_neighbor_grad_d import _resize_nearest_neighbor_grad_d_tbe
from
.pad_d
import
_pad_d_tbe
from
.arg_max_with_value
import
_arg_max_with_value_tbe
from
.arg_min_with_value
import
_arg_min_with_value_tbe
from
.smooth_l1_loss
import
_smooth_l1_loss_tbe
from
.smooth_l1_loss_grad
import
_smooth_l1_loss_grad_tbe
from
.fused_mul_add
import
_fused_mul_add_tbe
from
.fused_mul_add_n
import
_fused_mul_add_n_tbe
from
.fused_mul_apply_momentum
import
_fused_mul_apply_momentum_tbe
...
...
mindspore/ops/_op_impl/tbe/abs.py
0 → 100644
浏览文件 @
5c9791a8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Abs op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
abs_op_info
=
TBERegOp
(
"Abs"
)
\
.
fusion_type
(
"ELEMWISE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"abs.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"abs"
)
\
.
partial_flag
(
True
)
\
.
op_pattern
(
"formatAgnostic"
)
\
.
input
(
0
,
"x"
,
None
,
"required"
,
None
)
\
.
output
(
0
,
"y"
,
True
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
I32_Default
,
DataType
.
I32_Default
)
\
.
dtype_format
(
DataType
.
I32_5HD
,
DataType
.
I32_5HD
)
\
.
get_op_info
()
@
op_info_register
(
abs_op_info
)
def
_abs_tbe
():
"""Abs TBE register"""
return
mindspore/ops/_op_impl/tbe/abs_grad.py
0 → 100644
浏览文件 @
5c9791a8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""AbsGrad op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
abs_grad_op_info
=
TBERegOp
(
"AbsGrad"
)
\
.
fusion_type
(
"ELEMWISE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"abs_grad.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"abs_grad"
)
\
.
partial_flag
(
True
)
\
.
op_pattern
(
"formatAgnostic"
)
\
.
input
(
0
,
"y"
,
None
,
"required"
,
None
)
\
.
input
(
1
,
"dy"
,
None
,
"required"
,
None
)
\
.
output
(
0
,
"z"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
)
\
.
dtype_format
(
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
)
\
.
dtype_format
(
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
get_op_info
()
@
op_info_register
(
abs_grad_op_info
)
def
_abs_grad_tbe
():
"""AbsGrad TBE register"""
return
mindspore/ops/_op_impl/tbe/sign.py
0 → 100644
浏览文件 @
5c9791a8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sign op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
sign_op_info
=
TBERegOp
(
"Sign"
)
\
.
fusion_type
(
"ELEMWISE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"sign.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"sign"
)
\
.
partial_flag
(
True
)
\
.
op_pattern
(
"formatAgnostic"
)
\
.
input
(
0
,
"x"
,
None
,
"required"
,
None
)
\
.
output
(
0
,
"y"
,
True
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
I32_Default
,
DataType
.
I32_Default
)
\
.
dtype_format
(
DataType
.
I32_5HD
,
DataType
.
I32_5HD
)
\
.
get_op_info
()
@
op_info_register
(
sign_op_info
)
def
_sign_tbe
():
"""Sign TBE register"""
return
mindspore/ops/_op_impl/tbe/smooth_l1_loss.py
0 → 100644
浏览文件 @
5c9791a8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""SmoothL1Loss op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
smooth_l1_loss_op_info
=
TBERegOp
(
"SmoothL1Loss"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"smooth_l1_loss.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"smooth_l1_loss"
)
\
.
partial_flag
(
True
)
\
.
attr
(
"sigma"
,
"required"
,
"float"
,
"all"
)
\
.
input
(
0
,
"predict"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"label"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"loss"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
)
\
.
dtype_format
(
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
)
\
.
dtype_format
(
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
)
\
.
get_op_info
()
@
op_info_register
(
smooth_l1_loss_op_info
)
def
_smooth_l1_loss_tbe
():
"""SmoothL1Loss TBE register"""
return
mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py
0 → 100644
浏览文件 @
5c9791a8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""SmoothL1LossGrad op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
smooth_l1_loss_grad_op_info
=
TBERegOp
(
"SmoothL1LossGrad"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"smooth_l1_loss_grad.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"smooth_l1_loss_grad"
)
\
.
partial_flag
(
True
)
\
.
attr
(
"sigma"
,
"required"
,
"float"
,
"all"
)
\
.
input
(
0
,
"predict"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"label"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"dout"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"loss"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
,
DataType
.
F16_5HD
)
\
.
dtype_format
(
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
,
DataType
.
F16_FracZ
)
\
.
dtype_format
(
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
,
DataType
.
F16_C1HWNCoC0
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
,
DataType
.
F32_5HD
)
\
.
dtype_format
(
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
,
DataType
.
F32_FracZ
)
\
.
dtype_format
(
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
,
DataType
.
F32_C1HWNCoC0
)
\
.
get_op_info
()
@
op_info_register
(
smooth_l1_loss_grad_op_info
)
def
_smooth_l1_loss_grad_tbe
():
"""SmoothL1LossGrad TBE register"""
return
mindspore/ops/_op_impl/tbe/top
kv2
.py
→
mindspore/ops/_op_impl/tbe/top
_k
.py
浏览文件 @
5c9791a8
...
...
@@ -13,15 +13,15 @@
# limitations under the License.
# ============================================================================
"""TopK
V2
op"""
"""TopK op"""
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
top_k_
v2_op_info
=
TBERegOp
(
"TopKV2
"
)
\
top_k_
op_info
=
TBERegOp
(
"TopK
"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"top_k
_v2
.so"
)
\
.
binfile_name
(
"top_k.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"top_k
_v2
"
)
\
.
kernel_name
(
"top_k"
)
\
.
partial_flag
(
True
)
\
.
attr
(
"k"
,
"required"
,
"int"
,
"all"
)
\
.
attr
(
"sorted"
,
"required"
,
"bool"
,
"all"
)
\
...
...
@@ -33,7 +33,7 @@ top_k_v2_op_info = TBERegOp("TopKV2") \
.
get_op_info
()
@
op_info_register
(
top_k_
v2_
op_info
)
def
_top
k_v2
_tbe
():
"""TopK
V2
TBE register"""
@
op_info_register
(
top_k_op_info
)
def
_top
_k
_tbe
():
"""TopK TBE register"""
return
mindspore/ops/op_info_register.py
浏览文件 @
5c9791a8
...
...
@@ -599,3 +599,4 @@ class DataType:
F32_NCHW
=
(
"float32"
,
"NCHW"
)
F32_NHWC
=
(
"float32"
,
"NHWC"
)
F32_HWCN
=
(
"float32"
,
"HWCN"
)
\ No newline at end of file
tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss.py
0 → 100644
浏览文件 @
5c9791a8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
mindspore.nn
as
nn
import
mindspore.context
as
context
from
mindspore
import
Tensor
from
mindspore.ops
import
operations
as
P
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
sigma
=
1.0
):
super
(
Net
,
self
).
__init__
()
self
.
SmoothL1Loss
=
P
.
SmoothL1Loss
(
sigma
)
def
construct
(
self
,
pred
,
gt
):
return
self
.
SmoothL1Loss
(
pred
,
gt
)
def
test_net
():
pred
=
np
.
random
.
randn
(
2
,
4
).
astype
(
np
.
float32
)
gt
=
np
.
random
.
randn
(
2
,
4
).
astype
(
np
.
float32
)
smooth_l1_loss
=
Net
()
loss
=
smooth_l1_loss
(
Tensor
(
pred
),
Tensor
(
gt
))
print
(
"------------- input ---------------"
)
print
(
"predict:
\n
"
,
pred
)
print
(
"grount truth:
\n
"
,
gt
)
print
(
"------------- output ---------------"
)
print
(
"loss:
\n
"
,
loss
.
asnumpy
())
tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss_grad.py
0 → 100644
浏览文件 @
5c9791a8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
mindspore.nn
as
nn
import
mindspore.context
as
context
from
mindspore.ops.composite
import
GradOperation
from
mindspore
import
Tensor
from
mindspore.ops
import
operations
as
P
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
sigma
=
1.0
):
super
(
Net
,
self
).
__init__
()
self
.
SmoothL1Loss
=
P
.
SmoothL1Loss
(
sigma
)
def
construct
(
self
,
pred
,
gt
):
return
self
.
SmoothL1Loss
(
pred
,
gt
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
self
.
grad
=
GradOperation
(
name
=
"get_all"
,
get_all
=
True
,
sens_param
=
True
)
self
.
network
=
network
def
construct
(
self
,
pred
,
gt
,
dout
):
return
self
.
grad
(
self
.
network
)(
pred
,
gt
,
dout
)
def
test_net
():
pred
=
np
.
random
.
randn
(
2
,
4
).
astype
(
np
.
float32
)
gt
=
np
.
random
.
randn
(
2
,
4
).
astype
(
np
.
float32
)
dout
=
np
.
random
.
randn
(
2
,
4
).
astype
(
np
.
float32
)
smooth_l1_loss_grad
=
Grad
(
Net
())
output
=
smooth_l1_loss_grad
(
Tensor
(
pred
),
Tensor
(
gt
),
Tensor
(
dout
))
print
(
"------------- input ---------------"
)
print
(
"predict:
\n
"
,
pred
)
print
(
"grount truth:
\n
"
,
gt
)
print
(
"dout:
\n
"
,
dout
)
print
(
"------------- output ---------------"
)
print
(
"predict grad:
\n
"
,
output
[
0
].
asnumpy
())
tests/st/ops/davinci/test_tbe_ops/test_topk
v2
.py
→
tests/st/ops/davinci/test_tbe_ops/test_topk.py
浏览文件 @
5c9791a8
...
...
@@ -24,7 +24,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
k
):
super
(
Net
,
self
).
__init__
()
self
.
topk
=
P
.
TopK
()
self
.
topk
=
P
.
TopK
(
True
)
self
.
k
=
k
def
construct
(
self
,
x
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录