Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
98dc6eed
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
98dc6eed
编写于
8月 03, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 03, 2020
浏览文件
操作
浏览文件
下载
差异文件
!3877 Add new hms ops of floor, round and ceil with type of int8
Merge pull request !3877 from liuwenhao/master
上级
27a0a2e3
a367b3c2
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
746 addition
and
6 deletion
+746
-6
mindspore/lite/src/populate_parameter.cc
mindspore/lite/src/populate_parameter.cc
+1
-1
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h
+1
-0
mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc
.../lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc
+120
-0
mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h
...e/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h
+77
-0
mindspore/lite/src/runtime/kernel/arm/opclib/arithmetic_self_parameter.h
...src/runtime/kernel/arm/opclib/arithmetic_self_parameter.h
+29
-0
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/arithmetic_self.h
...lite/src/runtime/kernel/arm/opclib/fp32/arithmetic_self.h
+0
-5
mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.cc
...rc/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.cc
+93
-0
mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h
...src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h
+32
-0
mindspore/lite/src/runtime/kernel/arm/opclib/quantization/quantize.h
...ite/src/runtime/kernel/arm/opclib/quantization/quantize.h
+7
-0
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc
...src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc
+386
-0
未找到文件。
mindspore/lite/src/populate_parameter.cc
浏览文件 @
98dc6eed
...
@@ -43,7 +43,7 @@
...
@@ -43,7 +43,7 @@
#include "src/runtime/kernel/arm/opclib/fp32/range.h"
#include "src/runtime/kernel/arm/opclib/fp32/range.h"
#include "src/runtime/kernel/arm/opclib/fp32/local_response_norm.h"
#include "src/runtime/kernel/arm/opclib/fp32/local_response_norm.h"
#include "src/runtime/kernel/arm/opclib/fp32/expandDims.h"
#include "src/runtime/kernel/arm/opclib/fp32/expandDims.h"
#include "src/runtime/kernel/arm/opclib/
fp32/arithmetic_self
.h"
#include "src/runtime/kernel/arm/opclib/
arithmetic_self_parameter
.h"
#include "src/runtime/kernel/arm/opclib/pad_parameter.h"
#include "src/runtime/kernel/arm/opclib/pad_parameter.h"
#include "src/runtime/kernel/arm/opclib/fp32/fill.h"
#include "src/runtime/kernel/arm/opclib/fp32/fill.h"
#include "src/runtime/kernel/arm/opclib/transpose.h"
#include "src/runtime/kernel/arm/opclib/transpose.h"
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self.h
浏览文件 @
98dc6eed
...
@@ -20,6 +20,7 @@
...
@@ -20,6 +20,7 @@
#include <vector>
#include <vector>
#include "src/lite_kernel.h"
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/opclib/fp32/arithmetic_self.h"
#include "src/runtime/kernel/arm/opclib/fp32/arithmetic_self.h"
#include "src/runtime/kernel/arm/opclib/arithmetic_self_parameter.h"
#include "schema/model_generated.h"
#include "schema/model_generated.h"
#include "include/context.h"
#include "include/context.h"
...
...
mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.cc
0 → 100644
浏览文件 @
98dc6eed
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/arithmetic_self_int8.h"
#include <limits>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using
mindspore
::
kernel
::
KERNEL_ARCH
::
kCPU
;
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_OK
;
namespace
mindspore
::
kernel
{
int
ArithmeticSelfInt8CPUKernel
::
Init
()
{
int
ret
=
ReSize
();
auto
*
input_tensor
=
inputs_
.
at
(
kInputIndex
);
auto
in_quant_args
=
input_tensor
->
GetQuantParams
();
arithmeticSelfParameter_
->
quant_arg_
.
in_args_
.
scale_
=
in_quant_args
.
front
().
scale
;
arithmeticSelfParameter_
->
quant_arg_
.
in_args_
.
zp_
=
in_quant_args
.
front
().
zeroPoint
;
auto
*
out_tensor
=
outputs_
.
at
(
kOutputIndex
);
auto
out_quant_args
=
out_tensor
->
GetQuantParams
();
arithmeticSelfParameter_
->
quant_arg_
.
out_args_
.
scale_
=
out_quant_args
.
front
().
scale
;
arithmeticSelfParameter_
->
quant_arg_
.
out_args_
.
zp_
=
out_quant_args
.
front
().
zeroPoint
;
arithmeticSelfParameter_
->
quant_arg_
.
output_activation_max_
=
std
::
numeric_limits
<
int8_t
>::
max
();
arithmeticSelfParameter_
->
quant_arg_
.
output_activation_min_
=
std
::
numeric_limits
<
int8_t
>::
min
();
return
ret
;
}
int
ArithmeticSelfInt8CPUKernel
::
ReSize
()
{
data_size_
=
inputs_
[
0
]
->
ElementsNum
();
thread_sz_count_
=
MSMIN
(
thread_count_
,
data_size_
);
thread_sz_stride_
=
UP_DIV
(
data_size_
,
thread_sz_count_
);
return
RET_OK
;
}
int
ArithmeticSelfInt8Runs
(
int
task_id
,
LiteParallelGroupEnv
*
penv
,
void
*
cdata
)
{
auto
g_kernel
=
reinterpret_cast
<
ArithmeticSelfInt8CPUKernel
*>
(
cdata
);
auto
ret
=
g_kernel
->
DoArithmeticSelf
(
task_id
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"ArithmeticSelfRuns error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
ret
;
}
return
RET_OK
;
}
int
ArithmeticSelfInt8CPUKernel
::
DoArithmeticSelf
(
int
task_id
)
{
int
size
=
MSMIN
(
thread_sz_stride_
,
data_size_
-
task_id
*
thread_sz_stride_
);
if
(
size
<=
0
)
{
return
RET_OK
;
}
int
offset
=
task_id
*
thread_sz_stride_
;
if
(
arithmeticSelf_run_
)
{
auto
ret
=
arithmeticSelf_run_
(
in_ptr_
+
offset
,
out_ptr_
+
offset
,
size
,
arithmeticSelfParameter_
->
quant_arg_
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Run failed, illegal input! "
;
return
ret
;
}
}
else
{
MS_LOG
(
ERROR
)
<<
"Run function is null! "
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
ArithmeticSelfInt8CPUKernel
::
Run
()
{
auto
input_tensor
=
inputs_
.
at
(
0
);
auto
out_tensor
=
outputs_
.
at
(
0
);
in_ptr_
=
reinterpret_cast
<
int8_t
*>
(
input_tensor
->
Data
());
out_ptr_
=
reinterpret_cast
<
int8_t
*>
(
out_tensor
->
Data
());
int
ret
=
LiteBackendParallelLaunch
(
ArithmeticSelfInt8Runs
,
this
,
thread_sz_count_
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"ArithmeticSelfRun error error_code["
<<
ret
<<
"]"
;
return
ret
;
}
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuArithmeticSelfInt8KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
MS_ASSERT
(
opParameter
!=
nullptr
);
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Creator failed, opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
ArithmeticSelfInt8CPUKernel
(
opParameter
,
inputs
,
outputs
,
ctx
);
MS_ASSERT
(
kernel
!=
nullptr
);
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
opParameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
opParameter
->
type_
));
delete
kernel
;
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_Round
,
CpuArithmeticSelfInt8KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_Floor
,
CpuArithmeticSelfInt8KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_Ceil
,
CpuArithmeticSelfInt8KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h
0 → 100644
浏览文件 @
98dc6eed
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_ARITHMETIC_SELF_INT8_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_ARITHMETIC_SELF_INT8_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/opclib/arithmetic_self_parameter.h"
#include "src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h"
#include "schema/model_generated.h"
#include "include/context.h"
using
mindspore
::
lite
::
Context
;
using
mindspore
::
schema
::
PrimitiveType_Round
;
using
mindspore
::
schema
::
PrimitiveType_Floor
;
using
mindspore
::
schema
::
PrimitiveType_Ceil
;
namespace
mindspore
::
kernel
{
class
ArithmeticSelfInt8CPUKernel
:
public
LiteKernel
{
typedef
int
(
*
ArithmeticSelfInt8Run
)(
int8_t
*
input
,
int8_t
*
output
,
int
element_size
,
ArithSelfQuantArg
para
);
public:
explicit
ArithmeticSelfInt8CPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
ctx_
(
ctx
),
thread_count_
(
ctx
->
threadNum
)
{
switch
(
parameter
->
type_
)
{
case
PrimitiveType_Round
:
arithmeticSelf_run_
=
ElementRound
;
break
;
case
PrimitiveType_Floor
:
arithmeticSelf_run_
=
ElementFloor
;
break
;
case
PrimitiveType_Ceil
:
arithmeticSelf_run_
=
ElementCeil
;
break
;
default:
break
;
}
arithmeticSelfParameter_
=
reinterpret_cast
<
ArithmeticSelfParameter
*>
(
parameter
);
}
~
ArithmeticSelfInt8CPUKernel
()
override
=
default
;
int
Init
()
override
;
int
ReSize
()
override
;
int
Run
()
override
;
int
DoArithmeticSelf
(
int
task_id
);
private:
int
thread_count_
;
int
thread_sz_count_
;
int
thread_sz_stride_
;
size_t
data_size_
;
ArithmeticSelfParameter
*
arithmeticSelfParameter_
;
ArithmeticSelfInt8Run
arithmeticSelf_run_
;
const
Context
*
ctx_
;
int8_t
*
in_ptr_
;
int8_t
*
out_ptr_
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_ARITHMETIC_SELF_INT8_H_
mindspore/lite/src/runtime/kernel/arm/opclib/arithmetic_self_parameter.h
0 → 100644
浏览文件 @
98dc6eed
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARITHMETIC_SELF_PARAMETER_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARITHMETIC_SELF_PARAMETER_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
// For Abs, Cos, Exp, Log, Square, Sqrt, Rsqrt ops.
struct
ArithmeticSelfParameter
{
OpParameter
op_parameter_
;
ArithSelfQuantArg
quant_arg_
;
};
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_ARITHMETIC_SELF_PARAMETER_H_
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/arithmetic_self.h
浏览文件 @
98dc6eed
...
@@ -23,11 +23,6 @@
...
@@ -23,11 +23,6 @@
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
// For Abs, Cos, Exp, Log, Square, Sqrt, Rsqrt ops.
struct
ArithmeticSelfParameter
{
OpParameter
op_parameter_
;
};
int
ElementAbs
(
float
*
input
,
float
*
output
,
int
element_size
);
int
ElementAbs
(
float
*
input
,
float
*
output
,
int
element_size
);
int
ElementCos
(
float
*
input
,
float
*
output
,
int
element_size
);
int
ElementCos
(
float
*
input
,
float
*
output
,
int
element_size
);
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.cc
0 → 100644
浏览文件 @
98dc6eed
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include "src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h"
int
ElementFloor
(
int8_t
*
input
,
int8_t
*
output
,
int
element_size
,
ArithSelfQuantArg
para
)
{
if
(
para
.
in_args_
.
scale_
==
para
.
out_args_
.
scale_
&&
para
.
in_args_
.
zp_
==
para
.
out_args_
.
zp_
)
{
for
(
int
i
=
0
;
i
<
element_size
;
i
++
)
{
output
[
i
]
=
floorf
(
input
[
i
]);
}
}
else
{
float
in_scale
=
para
.
in_args_
.
scale_
;
int32_t
in_zp
=
para
.
in_args_
.
zp_
;
float
out_scale
=
para
.
out_args_
.
scale_
;
int32_t
out_zp
=
para
.
out_args_
.
zp_
;
float
bias
=
-
in_zp
*
in_scale
;
for
(
int
i
=
0
;
i
<
element_size
;
i
++
)
{
int32_t
output_tmp
=
round
(
floorf
(
input
[
i
]
*
in_scale
+
bias
)
/
out_scale
)
+
out_zp
;
if
(
output_tmp
>
para
.
output_activation_max_
)
{
output
[
i
]
=
para
.
output_activation_max_
;
}
else
if
(
output_tmp
<
para
.
output_activation_min_
)
{
output
[
i
]
=
para
.
output_activation_min_
;
}
else
{
output
[
i
]
=
static_cast
<
int8_t
>
(
output_tmp
);
}
}
}
return
OPCLIB_OK
;
}
int
ElementRound
(
int8_t
*
input
,
int8_t
*
output
,
int
element_size
,
ArithSelfQuantArg
para
)
{
if
(
para
.
in_args_
.
scale_
==
para
.
out_args_
.
scale_
&&
para
.
in_args_
.
zp_
==
para
.
out_args_
.
zp_
)
{
for
(
int
i
=
0
;
i
<
element_size
;
i
++
)
{
output
[
i
]
=
round
(
input
[
i
]);
}
}
else
{
float
in_scale
=
para
.
in_args_
.
scale_
;
int32_t
in_zp
=
para
.
in_args_
.
zp_
;
float
out_scale
=
para
.
out_args_
.
scale_
;
int32_t
out_zp
=
para
.
out_args_
.
zp_
;
float
bias
=
-
in_zp
*
in_scale
;
for
(
int
i
=
0
;
i
<
element_size
;
i
++
)
{
int32_t
output_tmp
=
round
(
round
(
input
[
i
]
*
in_scale
+
bias
)
/
out_scale
)
+
out_zp
;
if
(
output_tmp
>
para
.
output_activation_max_
)
{
output
[
i
]
=
para
.
output_activation_max_
;
}
else
if
(
output_tmp
<
para
.
output_activation_min_
)
{
output
[
i
]
=
para
.
output_activation_min_
;
}
else
{
output
[
i
]
=
static_cast
<
int8_t
>
(
output_tmp
);
}
}
}
return
OPCLIB_OK
;
}
int
ElementCeil
(
int8_t
*
input
,
int8_t
*
output
,
int
element_size
,
ArithSelfQuantArg
para
)
{
if
(
para
.
in_args_
.
scale_
==
para
.
out_args_
.
scale_
&&
para
.
in_args_
.
zp_
==
para
.
out_args_
.
zp_
)
{
for
(
int
i
=
0
;
i
<
element_size
;
i
++
)
{
output
[
i
]
=
ceil
(
input
[
i
]);
}
}
else
{
float
in_scale
=
para
.
in_args_
.
scale_
;
int32_t
in_zp
=
para
.
in_args_
.
zp_
;
float
out_scale
=
para
.
out_args_
.
scale_
;
int32_t
out_zp
=
para
.
out_args_
.
zp_
;
float
bias
=
-
in_zp
*
in_scale
;
for
(
int
i
=
0
;
i
<
element_size
;
i
++
)
{
int32_t
output_tmp
=
round
(
ceil
(
input
[
i
]
*
in_scale
+
bias
)
/
out_scale
)
+
out_zp
;
if
(
output_tmp
>
para
.
output_activation_max_
)
{
output
[
i
]
=
para
.
output_activation_max_
;
}
else
if
(
output_tmp
<
para
.
output_activation_min_
)
{
output
[
i
]
=
para
.
output_activation_min_
;
}
else
{
output
[
i
]
=
static_cast
<
int8_t
>
(
output_tmp
);
}
}
}
return
OPCLIB_OK
;
}
mindspore/lite/src/runtime/kernel/arm/opclib/int8/arithmetic_self_int8.h
0 → 100644
浏览文件 @
98dc6eed
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_ARITHMETIC_SELF_INT8_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_ARITHMETIC_SELF_INT8_H_
#ifdef ENABLE_NEON
#include <arm_neon.h>
#endif
#include "src/runtime/kernel/arm/opclib/op_base.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
int
ElementRound
(
int8_t
*
input
,
int8_t
*
output
,
int
element_size
,
ArithSelfQuantArg
para
);
int
ElementFloor
(
int8_t
*
input
,
int8_t
*
output
,
int
element_size
,
ArithSelfQuantArg
para
);
int
ElementCeil
(
int8_t
*
input
,
int8_t
*
output
,
int
number
,
ArithSelfQuantArg
para
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_ARITHMETIC_SELF_INT8_H_
mindspore/lite/src/runtime/kernel/arm/opclib/quantization/quantize.h
浏览文件 @
98dc6eed
...
@@ -83,6 +83,13 @@ struct CropQuantArg {
...
@@ -83,6 +83,13 @@ struct CropQuantArg {
int
output_activation_max_
;
int
output_activation_max_
;
};
};
struct
ArithSelfQuantArg
{
QuantArg
in_args_
;
QuantArg
out_args_
;
int
output_activation_min_
;
int
output_activation_max_
;
};
void
QuantizeMultiplier
(
double
double_multiplier
,
int32_t
*
quantized_multiplier
,
int
*
shift
);
void
QuantizeMultiplier
(
double
double_multiplier
,
int32_t
*
quantized_multiplier
,
int
*
shift
);
inline
void
QuantizeMultiplierSmallerThanOne
(
double
double_multiplier
,
int32_t
*
quantized_multiplier
,
inline
void
QuantizeMultiplierSmallerThanOne
(
double
double_multiplier
,
int32_t
*
quantized_multiplier
,
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc
0 → 100644
浏览文件 @
98dc6eed
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/arithmetic_self_parameter.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
#include "mindspore/lite/src/ir/tensor.h"
namespace
mindspore
{
class
TestArithmeticSelfInt8
:
public
mindspore
::
Common
{
public:
TestArithmeticSelfInt8
()
{}
};
TEST_F
(
TestArithmeticSelfInt8
,
floor_quant0_thread2
)
{
std
::
vector
<
int8_t
>
input1
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
std
::
vector
<
int
>
shape1
=
{
2
,
3
,
2
};
std
::
vector
<
int8_t
*>
input
(
1
,
nullptr
);
input
[
0
]
=
input1
.
data
();
const
int
output_size
=
12
;
int8_t
output
[
12
];
std
::
vector
<
int
>
output_shape
=
{
2
,
3
,
2
};
lite
::
tensor
::
QuantArg
input_quant_arg
;
input_quant_arg
.
scale
=
1.0
;
input_quant_arg
.
zeroPoint
=
0
;
lite
::
tensor
::
QuantArg
output_quant_arg
;
output_quant_arg
.
scale
=
1.0
;
output_quant_arg
.
zeroPoint
=
0
;
TypeId
tid_int8
=
kNumberTypeInt8
;
lite
::
tensor
::
Tensor
*
input_tensor1
=
new
lite
::
tensor
::
Tensor
;
input_tensor1
->
SetData
(
input1
.
data
());
input_tensor1
->
set_shape
(
shape1
);
input_tensor1
->
AddQuantParam
(
input_quant_arg
);
input_tensor1
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
(
1
);
inputs_tensor
[
0
]
=
input_tensor1
;
lite
::
tensor
::
Tensor
*
output0_tensor
=
new
lite
::
tensor
::
Tensor
;
output0_tensor
->
SetData
(
output
);
output0_tensor
->
set_shape
(
output_shape
);
output0_tensor
->
AddQuantParam
(
output_quant_arg
);
output0_tensor
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
(
1
);
outputs_tensor
[
0
]
=
output0_tensor
;
ArithmeticSelfParameter
op_param
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_Floor
;
lite
::
Context
*
ctx
=
new
lite
::
Context
;
ctx
->
threadNum
=
2
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_Floor
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
op_param
),
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
auto
output_tensor_shape
=
output0_tensor
->
shape
();
ASSERT_EQ
(
output_tensor_shape
,
output_shape
);
kernel
->
Run
();
std
::
vector
<
int8_t
>
except_result
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
PrintData
(
"output data"
,
output
,
output_size
);
PrintData
(
"output data shape"
,
output_tensor_shape
.
data
(),
output_tensor_shape
.
size
());
CompareOutputData
(
output
,
except_result
.
data
(),
output_size
,
0.000001
);
input_tensor1
->
SetData
(
nullptr
);
output0_tensor
->
SetData
(
nullptr
);
delete
input_tensor1
;
delete
output0_tensor
;
delete
ctx
;
}
TEST_F
(
TestArithmeticSelfInt8
,
floor_quant1_thread2
)
{
std
::
vector
<
int8_t
>
input1
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
std
::
vector
<
int
>
shape1
=
{
2
,
3
,
2
};
std
::
vector
<
int8_t
*>
input
(
1
,
nullptr
);
input
[
0
]
=
input1
.
data
();
const
int
output_size
=
12
;
int8_t
output
[
12
];
std
::
vector
<
int
>
output_shape
=
{
2
,
3
,
2
};
lite
::
tensor
::
QuantArg
input_quant_arg
;
input_quant_arg
.
scale
=
0.8
;
input_quant_arg
.
zeroPoint
=
0
;
lite
::
tensor
::
QuantArg
output_quant_arg
;
output_quant_arg
.
scale
=
1.5
;
output_quant_arg
.
zeroPoint
=
0
;
TypeId
tid_int8
=
kNumberTypeInt8
;
lite
::
tensor
::
Tensor
*
input_tensor1
=
new
lite
::
tensor
::
Tensor
;
input_tensor1
->
SetData
(
input1
.
data
());
input_tensor1
->
set_shape
(
shape1
);
input_tensor1
->
AddQuantParam
(
input_quant_arg
);
input_tensor1
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
(
1
);
inputs_tensor
[
0
]
=
input_tensor1
;
lite
::
tensor
::
Tensor
*
output0_tensor
=
new
lite
::
tensor
::
Tensor
;
output0_tensor
->
SetData
(
output
);
output0_tensor
->
set_shape
(
output_shape
);
output0_tensor
->
AddQuantParam
(
output_quant_arg
);
output0_tensor
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
(
1
);
outputs_tensor
[
0
]
=
output0_tensor
;
ArithmeticSelfParameter
op_param
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_Floor
;
lite
::
Context
*
ctx
=
new
lite
::
Context
;
ctx
->
threadNum
=
2
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_Floor
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
op_param
),
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
auto
output_tensor_shape
=
output0_tensor
->
shape
();
ASSERT_EQ
(
output_tensor_shape
,
output_shape
);
kernel
->
Run
();
std
::
vector
<
int8_t
>
except_result
=
{
0
,
1
,
1
,
2
,
3
,
3
,
3
,
4
,
5
,
5
,
5
,
6
};
PrintData
(
"output data"
,
output
,
output_size
);
PrintData
(
"output data shape"
,
output_tensor_shape
.
data
(),
output_tensor_shape
.
size
());
CompareOutputData
(
output
,
except_result
.
data
(),
output_size
,
0.000001
);
input_tensor1
->
SetData
(
nullptr
);
output0_tensor
->
SetData
(
nullptr
);
delete
input_tensor1
;
delete
output0_tensor
;
delete
ctx
;
}
TEST_F
(
TestArithmeticSelfInt8
,
round_quant0_thread2
)
{
std
::
vector
<
int8_t
>
input1
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
std
::
vector
<
int
>
shape1
=
{
2
,
3
,
2
};
std
::
vector
<
int8_t
*>
input
(
1
,
nullptr
);
input
[
0
]
=
input1
.
data
();
const
int
output_size
=
12
;
int8_t
output
[
12
];
std
::
vector
<
int
>
output_shape
=
{
2
,
3
,
2
};
lite
::
tensor
::
QuantArg
input_quant_arg
;
input_quant_arg
.
scale
=
1.0
;
input_quant_arg
.
zeroPoint
=
0
;
lite
::
tensor
::
QuantArg
output_quant_arg
;
output_quant_arg
.
scale
=
1.0
;
output_quant_arg
.
zeroPoint
=
0
;
TypeId
tid_int8
=
kNumberTypeInt8
;
lite
::
tensor
::
Tensor
*
input_tensor1
=
new
lite
::
tensor
::
Tensor
;
input_tensor1
->
SetData
(
input1
.
data
());
input_tensor1
->
set_shape
(
shape1
);
input_tensor1
->
AddQuantParam
(
input_quant_arg
);
input_tensor1
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
(
1
);
inputs_tensor
[
0
]
=
input_tensor1
;
lite
::
tensor
::
Tensor
*
output0_tensor
=
new
lite
::
tensor
::
Tensor
;
output0_tensor
->
SetData
(
output
);
output0_tensor
->
set_shape
(
output_shape
);
output0_tensor
->
AddQuantParam
(
output_quant_arg
);
output0_tensor
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
(
1
);
outputs_tensor
[
0
]
=
output0_tensor
;
ArithmeticSelfParameter
op_param
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_Round
;
lite
::
Context
*
ctx
=
new
lite
::
Context
;
ctx
->
threadNum
=
2
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_Floor
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
op_param
),
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
auto
output_tensor_shape
=
output0_tensor
->
shape
();
ASSERT_EQ
(
output_tensor_shape
,
output_shape
);
kernel
->
Run
();
std
::
vector
<
int8_t
>
except_result
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
PrintData
(
"output data"
,
output
,
output_size
);
PrintData
(
"output data shape"
,
output_tensor_shape
.
data
(),
output_tensor_shape
.
size
());
CompareOutputData
(
output
,
except_result
.
data
(),
output_size
,
0.000001
);
input_tensor1
->
SetData
(
nullptr
);
output0_tensor
->
SetData
(
nullptr
);
delete
input_tensor1
;
delete
output0_tensor
;
delete
ctx
;
}
TEST_F
(
TestArithmeticSelfInt8
,
round_quant1_thread2
)
{
std
::
vector
<
int8_t
>
input1
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
std
::
vector
<
int
>
shape1
=
{
2
,
3
,
2
};
std
::
vector
<
int8_t
*>
input
(
1
,
nullptr
);
input
[
0
]
=
input1
.
data
();
const
int
output_size
=
12
;
int8_t
output
[
12
];
std
::
vector
<
int
>
output_shape
=
{
2
,
3
,
2
};
lite
::
tensor
::
QuantArg
input_quant_arg
;
input_quant_arg
.
scale
=
0.8
;
input_quant_arg
.
zeroPoint
=
0
;
lite
::
tensor
::
QuantArg
output_quant_arg
;
output_quant_arg
.
scale
=
1.5
;
output_quant_arg
.
zeroPoint
=
0
;
TypeId
tid_int8
=
kNumberTypeInt8
;
lite
::
tensor
::
Tensor
*
input_tensor1
=
new
lite
::
tensor
::
Tensor
;
input_tensor1
->
SetData
(
input1
.
data
());
input_tensor1
->
set_shape
(
shape1
);
input_tensor1
->
AddQuantParam
(
input_quant_arg
);
input_tensor1
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
(
1
);
inputs_tensor
[
0
]
=
input_tensor1
;
lite
::
tensor
::
Tensor
*
output0_tensor
=
new
lite
::
tensor
::
Tensor
;
output0_tensor
->
SetData
(
output
);
output0_tensor
->
set_shape
(
output_shape
);
output0_tensor
->
AddQuantParam
(
output_quant_arg
);
output0_tensor
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
(
1
);
outputs_tensor
[
0
]
=
output0_tensor
;
ArithmeticSelfParameter
op_param
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_Round
;
lite
::
Context
*
ctx
=
new
lite
::
Context
;
ctx
->
threadNum
=
2
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_Floor
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
op_param
),
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
auto
output_tensor_shape
=
output0_tensor
->
shape
();
ASSERT_EQ
(
output_tensor_shape
,
output_shape
);
kernel
->
Run
();
std
::
vector
<
int8_t
>
except_result
=
{
1
,
1
,
1
,
2
,
3
,
3
,
4
,
4
,
5
,
5
,
6
,
7
};
PrintData
(
"output data"
,
output
,
output_size
);
PrintData
(
"output data shape"
,
output_tensor_shape
.
data
(),
output_tensor_shape
.
size
());
CompareOutputData
(
output
,
except_result
.
data
(),
output_size
,
0.000001
);
input_tensor1
->
SetData
(
nullptr
);
output0_tensor
->
SetData
(
nullptr
);
delete
input_tensor1
;
delete
output0_tensor
;
delete
ctx
;
}
TEST_F
(
TestArithmeticSelfInt8
,
ceil_quant0_thread2
)
{
std
::
vector
<
int8_t
>
input1
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
std
::
vector
<
int
>
shape1
=
{
2
,
3
,
2
};
std
::
vector
<
int8_t
*>
input
(
1
,
nullptr
);
input
[
0
]
=
input1
.
data
();
const
int
output_size
=
12
;
int8_t
output
[
12
];
std
::
vector
<
int
>
output_shape
=
{
2
,
3
,
2
};
lite
::
tensor
::
QuantArg
input_quant_arg
;
input_quant_arg
.
scale
=
1.0
;
input_quant_arg
.
zeroPoint
=
0
;
lite
::
tensor
::
QuantArg
output_quant_arg
;
output_quant_arg
.
scale
=
1.0
;
output_quant_arg
.
zeroPoint
=
0
;
TypeId
tid_int8
=
kNumberTypeInt8
;
lite
::
tensor
::
Tensor
*
input_tensor1
=
new
lite
::
tensor
::
Tensor
;
input_tensor1
->
SetData
(
input1
.
data
());
input_tensor1
->
set_shape
(
shape1
);
input_tensor1
->
AddQuantParam
(
input_quant_arg
);
input_tensor1
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
(
1
);
inputs_tensor
[
0
]
=
input_tensor1
;
lite
::
tensor
::
Tensor
*
output0_tensor
=
new
lite
::
tensor
::
Tensor
;
output0_tensor
->
SetData
(
output
);
output0_tensor
->
set_shape
(
output_shape
);
output0_tensor
->
AddQuantParam
(
output_quant_arg
);
output0_tensor
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
(
1
);
outputs_tensor
[
0
]
=
output0_tensor
;
ArithmeticSelfParameter
op_param
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_Ceil
;
lite
::
Context
*
ctx
=
new
lite
::
Context
;
ctx
->
threadNum
=
2
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_Floor
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
op_param
),
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
auto
output_tensor_shape
=
output0_tensor
->
shape
();
ASSERT_EQ
(
output_tensor_shape
,
output_shape
);
kernel
->
Run
();
std
::
vector
<
int8_t
>
except_result
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
PrintData
(
"output data"
,
output
,
output_size
);
PrintData
(
"output data shape"
,
output_tensor_shape
.
data
(),
output_tensor_shape
.
size
());
CompareOutputData
(
output
,
except_result
.
data
(),
output_size
,
0.000001
);
input_tensor1
->
SetData
(
nullptr
);
output0_tensor
->
SetData
(
nullptr
);
delete
input_tensor1
;
delete
output0_tensor
;
delete
ctx
;
}
TEST_F
(
TestArithmeticSelfInt8
,
ceil_quant1_thread2
)
{
std
::
vector
<
int8_t
>
input1
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
};
std
::
vector
<
int
>
shape1
=
{
2
,
3
,
2
};
std
::
vector
<
int8_t
*>
input
(
1
,
nullptr
);
input
[
0
]
=
input1
.
data
();
const
int
output_size
=
12
;
int8_t
output
[
12
];
std
::
vector
<
int
>
output_shape
=
{
2
,
3
,
2
};
lite
::
tensor
::
QuantArg
input_quant_arg
;
input_quant_arg
.
scale
=
0.8
;
input_quant_arg
.
zeroPoint
=
0
;
lite
::
tensor
::
QuantArg
output_quant_arg
;
output_quant_arg
.
scale
=
1.5
;
output_quant_arg
.
zeroPoint
=
0
;
TypeId
tid_int8
=
kNumberTypeInt8
;
lite
::
tensor
::
Tensor
*
input_tensor1
=
new
lite
::
tensor
::
Tensor
;
input_tensor1
->
SetData
(
input1
.
data
());
input_tensor1
->
set_shape
(
shape1
);
input_tensor1
->
AddQuantParam
(
input_quant_arg
);
input_tensor1
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
(
1
);
inputs_tensor
[
0
]
=
input_tensor1
;
lite
::
tensor
::
Tensor
*
output0_tensor
=
new
lite
::
tensor
::
Tensor
;
output0_tensor
->
SetData
(
output
);
output0_tensor
->
set_shape
(
output_shape
);
output0_tensor
->
AddQuantParam
(
output_quant_arg
);
output0_tensor
->
set_data_type
(
tid_int8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
(
1
);
outputs_tensor
[
0
]
=
output0_tensor
;
ArithmeticSelfParameter
op_param
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_Ceil
;
lite
::
Context
*
ctx
=
new
lite
::
Context
;
ctx
->
threadNum
=
2
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_Floor
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
op_param
),
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
auto
output_tensor_shape
=
output0_tensor
->
shape
();
ASSERT_EQ
(
output_tensor_shape
,
output_shape
);
kernel
->
Run
();
std
::
vector
<
int8_t
>
except_result
=
{
1
,
1
,
2
,
3
,
3
,
3
,
4
,
5
,
5
,
5
,
6
,
7
};
PrintData
(
"output data"
,
output
,
output_size
);
PrintData
(
"output data shape"
,
output_tensor_shape
.
data
(),
output_tensor_shape
.
size
());
CompareOutputData
(
output
,
except_result
.
data
(),
output_size
,
0.000001
);
input_tensor1
->
SetData
(
nullptr
);
output0_tensor
->
SetData
(
nullptr
);
delete
input_tensor1
;
delete
output0_tensor
;
delete
ctx
;
}
}
// namespace mindspore
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录