Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
f6dc9287
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f6dc9287
编写于
8月 03, 2020
作者:
Z
zhongligeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix quantdtypecast
上级
fa96dfd1
变更
20
隐藏空白更改
内联
并排
Showing
20 changed file
with
315 addition
and
416 deletion
+315
-416
mindspore/lite/src/ops/ops.cc
mindspore/lite/src/ops/ops.cc
+2
-4
mindspore/lite/src/ops/ops.h
mindspore/lite/src/ops/ops.h
+3
-10
mindspore/lite/src/ops/quant_dtype_cast.cc
mindspore/lite/src/ops/quant_dtype_cast.cc
+4
-3
mindspore/lite/src/ops/quantize.cc
mindspore/lite/src/ops/quantize.cc
+0
-34
mindspore/lite/src/populate_parameter.cc
mindspore/lite/src/populate_parameter.cc
+11
-20
mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc
...pore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.cc
+148
-0
mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h
...spore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h
+10
-9
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.h
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.h
+0
-46
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc
+40
-13
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h
+11
-2
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.cc
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.cc
+0
-118
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.h
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.h
+0
-28
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.cc
...lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.cc
+5
-2
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.h
.../lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.h
+1
-1
mindspore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.cc
...ore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.cc
+0
-29
mindspore/lite/src/runtime/kernel/arm/opclib/int8/quant_dtype_cast.cc
...te/src/runtime/kernel/arm/opclib/int8/quant_dtype_cast.cc
+12
-1
mindspore/lite/src/runtime/kernel/arm/opclib/int8/quant_dtype_cast.h
...ite/src/runtime/kernel/arm/opclib/int8/quant_dtype_cast.h
+7
-4
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/quantize_fp32_tests.cc
...est/ut/src/runtime/kernel/arm/fp32/quantize_fp32_tests.cc
+0
-77
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc
.../src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc
+5
-2
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc
.../ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc
+56
-13
未找到文件。
mindspore/lite/src/ops/ops.cc
浏览文件 @
f6dc9287
...
...
@@ -137,10 +137,8 @@ Primitive *Primitive::CreatePrimitive(schema::Primitive *primitive) {
return
new
lite
::
SpaceToDepth
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_SpaceToBatch
:
return
new
lite
::
SpaceToBatch
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_OnnxInt8Dequantize
:
return
new
lite
::
Dequantize
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_OnnxInt8Quantize
:
return
new
lite
::
Quantize
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_QuantDTypeCast
:
return
new
lite
::
QuantDTypeCast
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
default:
break
;
}
...
...
mindspore/lite/src/ops/ops.h
浏览文件 @
f6dc9287
...
...
@@ -691,17 +691,10 @@ class SpaceToDepth : public Primitive {
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
};
class
Dequantize
:
public
Primitive
{
class
QuantDTypeCast
:
public
Primitive
{
public:
explicit
Dequantize
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
const
schema
::
OnnxInt8Dequantize
*
GetAttribute
()
const
{
return
this
->
primitive
->
value_as_OnnxInt8Dequantize
();
}
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
};
class
Quantize
:
public
Primitive
{
public:
explicit
Quantize
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
const
schema
::
OnnxInt8Quantize
*
GetAttribute
()
const
{
return
this
->
primitive
->
value_as_OnnxInt8Quantize
();
}
explicit
QuantDTypeCast
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
const
schema
::
QuantDTypeCast
*
GetAttribute
()
const
{
return
this
->
primitive
->
value_as_QuantDTypeCast
();
}
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
};
}
// namespace lite
...
...
mindspore/lite/src/ops/
dequantize
.cc
→
mindspore/lite/src/ops/
quant_dtype_cast
.cc
浏览文件 @
f6dc9287
...
...
@@ -20,15 +20,16 @@
#include "src/ir/tensor.h"
namespace
mindspore
::
lite
{
int
Dequantize
::
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs_
,
std
::
vector
<
tensor
::
Tensor
*>
outputs_
)
{
int
QuantDTypeCast
::
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs_
,
std
::
vector
<
tensor
::
Tensor
*>
outputs_
)
{
MS_ASSERT
(
this
->
primitive
!=
nullptr
);
auto
input
=
inputs_
.
front
();
MS_ASSERT
(
input
!=
nullptr
);
auto
output
=
outputs_
.
front
();
MS_ASSERT
(
output
!=
nullptr
);
output
->
set_shape
(
input
->
shape
());
output
->
set_data_type
(
kNumberTypeFloat32
);
auto
param
=
primitive
->
value_as_QuantDTypeCast
();
MS_ASSERT
(
input
->
data_type
()
==
param
->
srcT
);
output
->
set_data_type
(
static_cast
<
TypeId
>
(
param
->
dstT
()));
return
RET_OK
;
}
}
// namespace mindspore::lite
mindspore/lite/src/ops/quantize.cc
已删除
100644 → 0
浏览文件 @
fa96dfd1
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/ops.h"
#include "include/errorcode.h"
#include "utils/log_adapter.h"
#include "src/ir/tensor.h"
namespace
mindspore
::
lite
{
int
Quantize
::
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs_
,
std
::
vector
<
tensor
::
Tensor
*>
outputs_
)
{
MS_ASSERT
(
this
->
primitive
!=
nullptr
);
auto
input
=
inputs_
.
front
();
MS_ASSERT
(
input
!=
nullptr
);
auto
output
=
outputs_
.
front
();
MS_ASSERT
(
output
!=
nullptr
);
output
->
set_shape
(
input
->
shape
());
output
->
set_data_type
(
kNumberTypeInt8
);
return
RET_OK
;
}
}
// namespace mindspore::lite
mindspore/lite/src/populate_parameter.cc
浏览文件 @
f6dc9287
...
...
@@ -65,8 +65,7 @@
#include "src/runtime/kernel/arm/base/prior_box.h"
#include "src/runtime/kernel/arm/opclib/fp32/space_to_depth.h"
#include "src/runtime/kernel/arm/opclib/fp32/space_to_batch.h"
#include "src/runtime/kernel/arm/opclib/int8/dequantize.h"
#include "src/runtime/kernel/arm/opclib/fp32/quantize.h"
#include "src/runtime/kernel/arm/opclib/int8/quant_dtype_cast.h"
namespace
mindspore
::
kernel
{
OpParameter
*
PopulateFillParameter
(
const
lite
::
Primitive
*
primitive
)
{
...
...
@@ -1032,24 +1031,17 @@ OpParameter *PopulateFlattenParameter(const lite::Primitive *primitive) {
return
reinterpret_cast
<
OpParameter
*>
(
flatten_param
);
}
OpParameter
*
PopulateDequantizeParameter
(
const
lite
::
Primitive
*
primitive
)
{
DequantizeParameter
*
dequantize_parameter
=
new
(
std
::
nothrow
)
DequantizeParameter
();
if
(
dequantize_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new DequantizeParameter fail!"
;
return
nullptr
;
}
dequantize_parameter
->
op_parameter_
.
type_
=
primitive
->
Type
();
return
reinterpret_cast
<
OpParameter
*>
(
dequantize_parameter
);
}
OpParameter
*
PopulateQuantizeParameter
(
const
lite
::
Primitive
*
primitive
)
{
QuantizeParameter
*
quantize_parameter
=
new
(
std
::
nothrow
)
QuantizeParameter
();
if
(
quantize_parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new QuantizeParameter fail!"
;
OpParameter
*
PopulateQuantDTypeCastParameter
(
const
lite
::
Primitive
*
primitive
)
{
QuantDTypeCastParameter
*
parameter
=
new
(
std
::
nothrow
)
QuantDTypeCastParameter
();
if
(
parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new QuantDTypeCastParameter fail!"
;
return
nullptr
;
}
quantize_parameter
->
op_parameter_
.
type_
=
primitive
->
Type
();
return
reinterpret_cast
<
OpParameter
*>
(
quantize_parameter
);
parameter
->
op_parameter_
.
type_
=
primitive
->
Type
();
auto
quant_dtype_cast_param
=
primitive
->
Value
()
->
value_as_QuantDTypeCast
();
parameter
->
srcT
=
quant_dtype_cast_param
->
srcT
();
parameter
->
dstT
=
quant_dtype_cast_param
->
dstT
();
return
reinterpret_cast
<
OpParameter
*>
(
parameter
);
}
OpParameter
*
PopulateStridedSliceParameter
(
const
lite
::
Primitive
*
primitive
)
{
...
...
@@ -1209,8 +1201,7 @@ PopulateParameterRegistry::PopulateParameterRegistry() {
populate_parameter_funcs_
[
schema
::
PrimitiveType_Square
]
=
PopulateSqueezeParameter
;
populate_parameter_funcs_
[
schema
::
PrimitiveType_Split
]
=
PopulateSplitParameter
;
populate_parameter_funcs_
[
schema
::
PrimitiveType_PriorBox
]
=
PopulatePriorBoxParameter
;
populate_parameter_funcs_
[
schema
::
PrimitiveType_OnnxInt8Dequantize
]
=
PopulateDequantizeParameter
;
populate_parameter_funcs_
[
schema
::
PrimitiveType_OnnxInt8Quantize
]
=
PopulateQuantizeParameter
;
populate_parameter_funcs_
[
schema
::
PrimitiveType_QuantDTypeCast
]
=
PopulateQuantDTypeCastParameter
;
}
PopulateParameterRegistry
*
PopulateParameterRegistry
::
GetInstance
()
{
...
...
mindspore/lite/src/runtime/kernel/arm/
fp32/quantize
.cc
→
mindspore/lite/src/runtime/kernel/arm/
base/quant_dtype_cast
.cc
浏览文件 @
f6dc9287
...
...
@@ -13,11 +13,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/
fp32/quantize
.h"
#include "src/runtime/kernel/arm/
base/quant_dtype_cast
.h"
#include <vector>
#include "src/runtime/kernel/arm/opclib/fp32/quantize.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/int8/quant_dtype_cast.h"
#include "src/runtime/runtime_api.h"
#include "src/kernel_registry.h"
#include "schema/model_generated.h"
#include "include/errorcode.h"
...
...
@@ -25,15 +25,15 @@ using mindspore::kernel::KERNEL_ARCH::kCPU;
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
schema
::
PrimitiveType_
OnnxInt8Quantize
;
using
mindspore
::
schema
::
PrimitiveType_
QuantDTypeCast
;
namespace
mindspore
::
kernel
{
namespace
{
constexpr
int
kQuant
ize
InputNum
=
1
;
constexpr
int
kQuant
ize
OutputNum
=
1
;
constexpr
int
kQuant
DTypeCast
InputNum
=
1
;
constexpr
int
kQuant
DTypeCast
OutputNum
=
1
;
}
// namespace
int
Quant
ize
CPUKernel
::
Init
()
{
int
Quant
DTypeCast
CPUKernel
::
Init
()
{
if
(
inputs_
.
size
()
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"inputs number should be 1, but "
<<
inputs_
.
size
()
<<
" is given."
;
return
RET_ERROR
;
...
...
@@ -43,6 +43,25 @@ int QuantizeCPUKernel::Init() {
return
RET_ERROR
;
}
auto
in_tensor
=
inputs_
.
front
();
auto
out_tensor
=
outputs_
.
front
();
auto
param
=
reinterpret_cast
<
QuantDTypeCastParameter
*>
(
opParameter
);
if
(
param
->
srcT
==
kNumberTypeFloat32
&&
param
->
dstT
==
kNumberTypeInt8
)
{
if
(
in_tensor
->
data_type
()
!=
kNumberTypeFloat32
||
out_tensor
->
data_type
()
!=
kNumberTypeInt8
)
{
MS_LOG
(
ERROR
)
<<
"param data type and tensor data type do not match."
;
return
RET_ERROR
;
}
inverse_
=
false
;
}
else
if
(
param
->
srcT
==
kNumberTypeInt8
&&
param
->
dstT
==
kNumberTypeFloat32
)
{
if
(
in_tensor
->
data_type
()
!=
kNumberTypeInt8
||
out_tensor
->
data_type
()
!=
kNumberTypeFloat32
)
{
MS_LOG
(
ERROR
)
<<
"param data type and tensor data type do not match."
;
return
RET_ERROR
;
}
inverse_
=
true
;
}
else
{
MS_LOG
(
ERROR
)
<<
"param data type not supported."
;
return
RET_ERROR
;
}
num_unit_
=
static_cast
<
int
>
(
in_tensor
->
DataSize
());
thread_n_num_
=
MSMIN
(
thread_num_
,
num_unit_
);
thread_n_stride_
=
UP_DIV
(
num_unit_
,
thread_n_num_
);
...
...
@@ -50,39 +69,50 @@ int QuantizeCPUKernel::Init() {
return
RET_OK
;
}
int
Quant
ize
CPUKernel
::
ReSize
()
{
return
RET_OK
;
}
int
Quant
DTypeCast
CPUKernel
::
ReSize
()
{
return
RET_OK
;
}
int
Quant
izeCPUKernel
::
Quantize
(
int
task_id
)
{
int
Quant
DTypeCastCPUKernel
::
QuantDTypeCast
(
int
task_id
)
{
int
num_unit_thread
=
MSMIN
(
thread_n_stride_
,
num_unit_
-
task_id
*
thread_n_stride_
);
if
(
num_unit_thread
<=
0
)
{
return
RET_OK
;
}
int
thread_offset
=
task_id
*
thread_n_stride_
;
auto
quant_arg
=
inputs_
.
front
()
->
GetQuantParams
().
front
();
int
ret
=
QuantizeToInt8
(
input_ptr_
+
thread_offset
,
output_ptr_
+
thread_offset
,
quant_arg
.
scale
,
quant_arg
.
zeroPoint
,
num_unit_thread
);
int
ret
;
if
(
inverse_
)
{
ret
=
DequantizeInt8
(
int8_ptr_
+
thread_offset
,
float32_ptr_
+
thread_offset
,
quant_arg
.
scale
,
quant_arg
.
zeroPoint
,
num_unit_thread
);
}
else
{
ret
=
QuantizeToInt8
(
float32_ptr_
+
thread_offset
,
int8_ptr_
+
thread_offset
,
quant_arg
.
scale
,
quant_arg
.
zeroPoint
,
num_unit_thread
);
}
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Quant
ize
error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
MS_LOG
(
ERROR
)
<<
"Quant
DTypeCast
error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
Quant
ize
Run
(
int
task_id
,
LiteParallelGroupEnv
*
penv
,
void
*
cdata
)
{
auto
g_kernel
=
reinterpret_cast
<
Quant
ize
CPUKernel
*>
(
cdata
);
auto
ret
=
g_kernel
->
Quant
ize
(
task_id
);
int
Quant
DTypeCast
Run
(
int
task_id
,
LiteParallelGroupEnv
*
penv
,
void
*
cdata
)
{
auto
g_kernel
=
reinterpret_cast
<
Quant
DTypeCast
CPUKernel
*>
(
cdata
);
auto
ret
=
g_kernel
->
Quant
DTypeCast
(
task_id
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Quant
ize
Run error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
MS_LOG
(
ERROR
)
<<
"Quant
DTypeCast
Run error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
QuantizeCPUKernel
::
Run
()
{
input_ptr_
=
reinterpret_cast
<
float
*>
(
inputs_
[
0
]
->
Data
());
output_ptr_
=
reinterpret_cast
<
int8_t
*>
(
outputs_
[
0
]
->
Data
());
int
ret
=
LiteBackendParallelLaunch
(
QuantizeRun
,
this
,
thread_n_num_
);
int
QuantDTypeCastCPUKernel
::
Run
()
{
if
(
inverse_
)
{
int8_ptr_
=
reinterpret_cast
<
int8_t
*>
(
inputs_
[
0
]
->
Data
());
float32_ptr_
=
reinterpret_cast
<
float
*>
(
outputs_
[
0
]
->
Data
());
}
else
{
float32_ptr_
=
reinterpret_cast
<
float
*>
(
inputs_
[
0
]
->
Data
());
int8_ptr_
=
reinterpret_cast
<
int8_t
*>
(
outputs_
[
0
]
->
Data
());
}
int
ret
=
LiteBackendParallelLaunch
(
QuantDTypeCastRun
,
this
,
thread_n_num_
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Scale error error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
...
...
@@ -91,17 +121,17 @@ int QuantizeCPUKernel::Run() {
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuQuant
ize
Fp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
kernel
::
LiteKernel
*
CpuQuant
DTypeCast
Fp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
Quant
ize
CPUKernel
(
opParameter
,
inputs
,
outputs
,
ctx
);
auto
*
kernel
=
new
(
std
::
nothrow
)
Quant
DTypeCast
CPUKernel
(
opParameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new Quant
ize
CPUKernel fail!"
;
MS_LOG
(
ERROR
)
<<
"new Quant
DTypeCast
CPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
...
...
@@ -114,5 +144,5 @@ kernel::LiteKernel *CpuQuantizeFp32KernelCreator(const std::vector<lite::tensor:
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberType
Float32
,
PrimitiveType_OnnxInt8Quantize
,
CpuQuantize
Fp32KernelCreator
)
REG_KERNEL
(
kCPU
,
kNumberType
Int8
,
PrimitiveType_QuantDTypeCast
,
CpuQuantDTypeCast
Fp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/
int8/dequantize
.h
→
mindspore/lite/src/runtime/kernel/arm/
base/quant_dtype_cast
.h
浏览文件 @
f6dc9287
...
...
@@ -14,33 +14,34 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_
INT8_DEQUANTIZE
_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_
INT8_DEQUANTIZE
_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_
BASE_QUANTDTYPECAST
_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_
BASE_QUANTDTYPECAST
_H_
#include <vector>
#include "src/lite_kernel.h"
namespace
mindspore
::
kernel
{
class
Dequantize
CPUKernel
:
public
LiteKernel
{
class
QuantDTypeCast
CPUKernel
:
public
LiteKernel
{
public:
Dequantize
CPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
QuantDTypeCast
CPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
thread_num_
(
ctx
->
threadNum
)
{}
~
Dequantize
CPUKernel
()
=
default
;
~
QuantDTypeCast
CPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
;
int
Run
()
override
;
int
Dequantize
(
int
task_id
);
int
QuantDTypeCast
(
int
task_id
);
private:
int
thread_num_
;
int
thread_n_num_
;
int
thread_n_stride_
;
int
num_unit_
;
int8_t
*
input_ptr_
;
float
*
output_ptr_
;
int8_t
*
int8_ptr_
;
float
*
float32_ptr_
;
bool
inverse_
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_
INT8_DEQUANTIZE
_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_
BASE_QUANTDTYPECAST
_H_
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.h
已删除
100644 → 0
浏览文件 @
fa96dfd1
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_QUANTIZE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_QUANTIZE_H_
#include <vector>
#include "src/lite_kernel.h"
namespace
mindspore
::
kernel
{
class
QuantizeCPUKernel
:
public
LiteKernel
{
public:
QuantizeCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
thread_num_
(
ctx
->
threadNum
)
{}
~
QuantizeCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
;
int
Run
()
override
;
int
Quantize
(
int
task_id
);
private:
int
thread_num_
;
int
thread_n_num_
;
int
thread_n_stride_
;
int
num_unit_
;
float
*
input_ptr_
;
int8_t
*
output_ptr_
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_QUANTIZE_H_
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc
浏览文件 @
f6dc9287
...
...
@@ -16,11 +16,11 @@
#include "src/runtime/kernel/arm/fp32/space_to_depth.h"
#include <vector>
#include "schema/ops_generated.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/fp32/space_to_depth.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
...
...
@@ -41,21 +41,48 @@ int SpaceToDepthCPUKernel::Init() {
MS_LOG
(
ERROR
)
<<
"Input block_size should > 0!"
;
return
RET_PARAM_INVALID
;
}
num_unit_
=
static_cast
<
int
>
(
inputs_
[
0
]
->
shape
().
at
(
kNHWC_H
));
thread_h_num_
=
MSMIN
(
thread_num_
,
num_unit_
);
thread_h_stride_
=
UP_DIV
(
num_unit_
,
thread_h_num_
);
return
RET_OK
;
}
int
SpaceToDepthCPUKernel
::
Run
()
{
auto
input
=
inputs_
[
0
];
auto
output
=
outputs_
[
0
];
const
float
*
input_data
=
static_cast
<
const
float
*>
(
input
->
Data
());
float
*
output_data
=
static_cast
<
float
*>
(
output
->
Data
());
auto
in_shape
=
input
->
shape
();
auto
out_shape
=
output
->
shape
();
int
SpaceToDepthCPUKernel
::
SpaceToDepth
(
int
task_id
)
{
int
num_unit_thread
=
MSMIN
(
thread_h_stride_
,
num_unit_
-
task_id
*
thread_h_stride_
);
if
(
num_unit_thread
<=
0
)
{
return
RET_OK
;
}
int
thread_offset
=
task_id
*
thread_h_stride_
;
auto
in_shape
=
inputs_
[
0
]
->
shape
();
auto
out_shape
=
outputs_
[
0
]
->
shape
();
SpaceToDepthParameter
*
param
=
reinterpret_cast
<
SpaceToDepthParameter
*>
(
opParameter
);
if
(
input
->
GetFormat
()
==
schema
::
Format_NHWC
)
{
auto
ret
=
SpaceToDepthForNHWC
(
input_data
,
output_data
,
in_shape
.
data
(),
out_shape
.
data
(),
in_shape
.
size
(),
param
->
block_size_
);
return
ret
;
auto
ret
=
SpaceToDepthForNHWC
(
input_ptr_
,
output_ptr_
,
in_shape
.
data
(),
out_shape
.
data
(),
in_shape
.
size
(),
param
->
block_size_
,
thread_offset
,
thread_offset
+
num_unit_thread
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"SpaceToDepth error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
SpaceToDepthRun
(
int
task_id
,
LiteParallelGroupEnv
*
penv
,
void
*
cdata
)
{
auto
g_kernel
=
reinterpret_cast
<
SpaceToDepthCPUKernel
*>
(
cdata
);
auto
ret
=
g_kernel
->
SpaceToDepth
(
task_id
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"SpaceToDepthRun error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
SpaceToDepthCPUKernel
::
Run
()
{
if
(
inputs_
[
0
]
->
GetFormat
()
==
schema
::
Format_NHWC
)
{
int
ret
=
LiteBackendParallelLaunch
(
SpaceToDepthRun
,
this
,
thread_h_num_
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"SpaceToDepth error error_code["
<<
ret
<<
"]"
;
return
ret
;
}
}
else
{
MS_LOG
(
ERROR
)
<<
"Only support NHWC now!"
;
return
RET_ERROR
;
...
...
@@ -69,7 +96,7 @@ kernel::LiteKernel *CpuSpaceToDepthFp32KernelCreator(const std::vector<lite::ten
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
SpaceToDepthCPUKernel
(
opParameter
,
inputs
,
outputs
);
auto
*
kernel
=
new
(
std
::
nothrow
)
SpaceToDepthCPUKernel
(
opParameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new SpaceToDepthCPUKernel fail!"
;
return
nullptr
;
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h
浏览文件 @
f6dc9287
...
...
@@ -24,13 +24,22 @@ namespace mindspore::kernel {
class
SpaceToDepthCPUKernel
:
public
LiteKernel
{
public:
SpaceToDepthCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{}
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
,
thread_num_
(
ctx
->
threadNum
)
{}
~
SpaceToDepthCPUKernel
()
=
default
;
int
SpaceToDepth
(
int
task_id
);
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
};
int
Run
()
override
;
private:
int
thread_num_
;
int
thread_h_stride_
;
int
thread_h_num_
;
int
num_unit_
;
float
*
input_ptr_
;
float
*
output_ptr_
;
};
}
// namespace mindspore::kernel
...
...
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.cc
已删除
100644 → 0
浏览文件 @
fa96dfd1
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/dequantize.h"
#include <vector>
#include "src/runtime/kernel/arm/opclib/int8/dequantize.h"
#include "src/runtime/runtime_api.h"
#include "src/kernel_registry.h"
#include "schema/model_generated.h"
#include "include/errorcode.h"
using
mindspore
::
kernel
::
KERNEL_ARCH
::
kCPU
;
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
schema
::
PrimitiveType_OnnxInt8Dequantize
;
namespace
mindspore
::
kernel
{
namespace
{
constexpr
int
kDequantizeInputNum
=
1
;
constexpr
int
kDequantizeOutputNum
=
1
;
}
// namespace
int
DequantizeCPUKernel
::
Init
()
{
if
(
inputs_
.
size
()
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"inputs number should be 1, but "
<<
inputs_
.
size
()
<<
" is given."
;
return
RET_ERROR
;
}
if
(
outputs_
.
size
()
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"outputs number should be 1, but "
<<
inputs_
.
size
()
<<
" is given."
;
return
RET_ERROR
;
}
auto
in_tensor
=
inputs_
.
front
();
num_unit_
=
static_cast
<
int
>
(
in_tensor
->
DataSize
());
thread_n_num_
=
MSMIN
(
thread_num_
,
num_unit_
);
thread_n_stride_
=
UP_DIV
(
num_unit_
,
thread_n_num_
);
return
RET_OK
;
}
int
DequantizeCPUKernel
::
ReSize
()
{
return
RET_OK
;
}
int
DequantizeCPUKernel
::
Dequantize
(
int
task_id
)
{
int
num_unit_thread
=
MSMIN
(
thread_n_stride_
,
num_unit_
-
task_id
*
thread_n_stride_
);
if
(
num_unit_thread
<=
0
)
{
return
RET_OK
;
}
int
thread_offset
=
task_id
*
thread_n_stride_
;
auto
quant_arg
=
inputs_
.
front
()
->
GetQuantParams
().
front
();
int
ret
=
DequantizeInt8
(
input_ptr_
+
thread_offset
,
output_ptr_
+
thread_offset
,
quant_arg
.
scale
,
quant_arg
.
zeroPoint
,
num_unit_thread
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Dequantize error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
DequantizeRun
(
int
task_id
,
LiteParallelGroupEnv
*
penv
,
void
*
cdata
)
{
auto
g_kernel
=
reinterpret_cast
<
DequantizeCPUKernel
*>
(
cdata
);
auto
ret
=
g_kernel
->
Dequantize
(
task_id
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"DequantizeRun error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
DequantizeCPUKernel
::
Run
()
{
input_ptr_
=
reinterpret_cast
<
int8_t
*>
(
inputs_
[
0
]
->
Data
());
output_ptr_
=
reinterpret_cast
<
float
*>
(
outputs_
[
0
]
->
Data
());
int
ret
=
LiteBackendParallelLaunch
(
DequantizeRun
,
this
,
thread_n_num_
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Scale error error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuDequantizeFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
DequantizeCPUKernel
(
opParameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new DequantizeCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Init kernel failed! name: "
<<
opParameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
opParameter
->
type_
));
delete
kernel
;
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_OnnxInt8Dequantize
,
CpuDequantizeFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.h
已删除
100644 → 0
浏览文件 @
fa96dfd1
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_QUANTIZE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_QUANTIZE_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
struct
QuantizeParameter
{
OpParameter
op_parameter_
;
};
int
QuantizeToInt8
(
float
*
real_values
,
int8_t
*
quant_values
,
float
scale
,
int32_t
zp
,
int
size
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_QUANTIZE_H_
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.cc
浏览文件 @
f6dc9287
...
...
@@ -19,13 +19,16 @@
#include "src/runtime/kernel/arm/opclib/op_base.h"
int
SpaceToDepthForNHWC
(
const
float
*
input
,
float
*
output
,
int
*
in_shape
,
int
*
out_shape
,
int
shape_size
,
int
block_size
)
{
int
block_size
,
int
h_start
,
int
h_end
)
{
if
(
input
==
nullptr
||
output
==
nullptr
)
{
return
OPCLIB_NULL_PTR
;
}
if
(
shape_size
!=
C4NUM
)
{
return
OPCLIB_PARAM_INVALID
;
}
if
(
h_start
<
0
||
h_start
>=
h_end
||
h_end
>
out_shape
[
1
])
{
return
OPCLIB_PARAM_INVALID
;
}
int
in_strides
[
C4NUM
];
ComputeStrides
(
in_shape
,
in_strides
,
shape_size
);
int
out_strides
[
C4NUM
];
...
...
@@ -33,7 +36,7 @@ int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, int *o
for
(
int
i
=
0
;
i
<
out_shape
[
0
];
++
i
)
{
size_t
in_offset_n
=
i
*
in_strides
[
0
];
size_t
out_offset_n
=
i
*
out_strides
[
0
];
for
(
int
j
=
0
;
j
<
out_shape
[
1
]
;
++
j
)
{
for
(
int
j
=
h_start
;
j
<
h_end
;
++
j
)
{
size_t
in_offset_h
=
in_offset_n
+
j
*
block_size
*
in_strides
[
1
];
size_t
out_offset_h
=
out_offset_n
+
j
*
out_strides
[
1
];
for
(
int
k
=
0
;
k
<
out_shape
[
2
];
++
k
)
{
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.h
浏览文件 @
f6dc9287
...
...
@@ -23,5 +23,5 @@ struct SpaceToDepthParameter {
};
int
SpaceToDepthForNHWC
(
const
float
*
input
,
float
*
output
,
int
*
in_shape
,
int
*
out_shape
,
int
shape_size
,
int
block_size
);
int
block_size
,
int
h_start
,
int
h_end
);
#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_OPCLIB_FP32_SPACE_TO_DEPTH_H_
mindspore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.cc
已删除
100644 → 0
浏览文件 @
fa96dfd1
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/int8/dequantize.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
int
DequantizeInt8
(
int8_t
*
quant_values
,
float
*
real_values
,
float
scale
,
int32_t
zp
,
int
size
)
{
if
(
quant_values
==
nullptr
||
real_values
==
nullptr
)
{
return
OPCLIB_PARAM_INVALID
;
}
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
real_values
[
i
]
=
(
quant_values
[
i
]
+
zp
)
*
scale
;
}
return
OPCLIB_OK
;
}
mindspore/lite/src/runtime/kernel/arm/opclib/
fp32/quantize
.cc
→
mindspore/lite/src/runtime/kernel/arm/opclib/
int8/quant_dtype_cast
.cc
浏览文件 @
f6dc9287
...
...
@@ -14,9 +14,20 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/
fp32/quantize
.h"
#include "src/runtime/kernel/arm/opclib/
int8/quant_dtype_cast
.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
int
DequantizeInt8
(
int8_t
*
quant_values
,
float
*
real_values
,
float
scale
,
int32_t
zp
,
int
size
)
{
if
(
quant_values
==
nullptr
||
real_values
==
nullptr
)
{
return
OPCLIB_PARAM_INVALID
;
}
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
real_values
[
i
]
=
(
quant_values
[
i
]
+
zp
)
*
scale
;
}
return
OPCLIB_OK
;
}
int
QuantizeToInt8
(
float
*
real_values
,
int8_t
*
quant_values
,
float
scale
,
int32_t
zp
,
int
size
)
{
if
(
quant_values
==
nullptr
||
real_values
==
nullptr
)
{
return
OPCLIB_PARAM_INVALID
;
...
...
mindspore/lite/src/runtime/kernel/arm/opclib/int8/
dequantize
.h
→
mindspore/lite/src/runtime/kernel/arm/opclib/int8/
quant_dtype_cast
.h
浏览文件 @
f6dc9287
...
...
@@ -14,15 +14,18 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_
DEQUANTIZE
_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_
DEQUANTIZE
_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_
QUANTDTYPECAST
_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_
QUANTDTYPECAST
_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
struct
Dequantize
Parameter
{
struct
QuantDTypeCast
Parameter
{
OpParameter
op_parameter_
;
int32_t
srcT
;
int32_t
dstT
;
};
int
DequantizeInt8
(
int8_t
*
quant_values
,
float
*
real_values
,
float
scale
,
int32_t
zp
,
int
size
);
int
QuantizeToInt8
(
float
*
real_values
,
int8_t
*
quant_values
,
float
scale
,
int32_t
zp
,
int
size
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_
DEQUANTIZE
_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_
QUANTDTYPECAST
_H_
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/quantize_fp32_tests.cc
已删除
100644 → 0
浏览文件 @
fa96dfd1
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/fp32/quantize.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
namespace
mindspore
{
class
QuantizeTestFp32
:
public
mindspore
::
Common
{
public:
QuantizeTestFp32
()
{}
};
TEST_F
(
QuantizeTestFp32
,
QuantizeTest1
)
{
const
lite
::
tensor
::
QuantArg
quant_arg
=
{
0.3515625
,
-
57
};
QuantizeParameter
param
;
param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_OnnxInt8Quantize
;
std
::
vector
<
float
>
input
=
{
1
,
2
,
5
,
6
,
10
,
-
20
,
3
,
8
,
18
,
10
,
3
,
4
,
11
,
16
,
15
,
25
};
std
::
vector
<
int
>
in_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
input_tensor
;
input_tensor
.
SetData
(
input
.
data
());
input_tensor
.
set_shape
(
in_shape
);
input_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
input_tensor
.
set_data_type
(
kNumberTypeFloat32
);
input_tensor
.
AddQuantParam
(
quant_arg
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
;
inputs_tensor
.
emplace_back
(
&
input_tensor
);
const
int
out_size
=
16
;
int8_t
expect_out
[
16
]
=
{
-
54
,
-
51
,
-
43
,
-
40
,
-
29
,
-
114
,
-
48
,
-
34
,
-
6
,
-
29
,
-
48
,
-
46
,
-
26
,
-
11
,
-
14
,
14
};
std
::
vector
<
int8_t
>
output
(
16
);
std
::
vector
<
int
>
out_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
output_tensor
;
output_tensor
.
SetData
(
output
.
data
());
output_tensor
.
set_shape
(
out_shape
);
output_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
output_tensor
.
set_data_type
(
kNumberTypeInt8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
;
outputs_tensor
.
emplace_back
(
&
output_tensor
);
lite
::
Context
ctx
;
ctx
.
threadNum
=
3
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeFloat32
,
schema
::
PrimitiveType_OnnxInt8Quantize
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
param
),
&
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
kernel
->
Run
();
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
.
data
(),
expect_out
,
out_size
,
0.000001
);
}
}
// namespace mindspore
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc
浏览文件 @
f6dc9287
...
...
@@ -37,7 +37,9 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest1) {
float
output
[
16
];
int
in_shape
[
4
]
=
{
1
,
4
,
4
,
1
};
int
out_shape
[
4
]
=
{
1
,
2
,
2
,
4
};
SpaceToDepthForNHWC
((
const
float
*
)
input
,
output
,
in_shape
,
out_shape
,
4
,
2
);
int
h_start
=
0
;
int
h_end
=
2
;
SpaceToDepthForNHWC
((
const
float
*
)
input
,
output
,
in_shape
,
out_shape
,
4
,
2
,
h_start
,
h_end
);
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
...
...
@@ -69,10 +71,11 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) {
outputs_tensor
.
emplace_back
(
&
output_tensor
);
SpaceToDepthParameter
op_param
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_SpaceTo
Batc
h
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_SpaceTo
Dept
h
;
op_param
.
block_size_
=
2
;
lite
::
Context
ctx
;
ctx
.
threadNum
=
3
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeFloat32
,
schema
::
PrimitiveType_SpaceToDepth
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/
dequantize_int8
_tests.cc
→
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/
quant_dtype_cast
_tests.cc
浏览文件 @
f6dc9287
...
...
@@ -17,28 +17,26 @@
#include <memory>
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/
int8/dequantize
.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/int8/
dequantize
.h"
#include "mindspore/lite/src/runtime/kernel/arm/
base/quant_dtype_cast
.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/int8/
quant_dtype_cast
.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
namespace
mindspore
{
class
Dequantize
TestFp32
:
public
mindspore
::
Common
{
class
QuantDTypeCast
TestFp32
:
public
mindspore
::
Common
{
public:
Dequantize
TestFp32
()
{}
QuantDTypeCast
TestFp32
()
{}
};
TEST_F
(
DequantizeTestFp32
,
Dequantize
Test1
)
{
TEST_F
(
QuantDTypeCastTestFp32
,
QuantDTypeCast
Test1
)
{
const
lite
::
tensor
::
QuantArg
quant_arg
{
0.21176
,
5
};
// quant_arg.scale = 100.0
;
// quant_arg.zeroPoint = 20
;
DequantizeParameter
param
;
param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_
OnnxInt8Dequantize
;
QuantDTypeCastParameter
param
;
param
.
srcT
=
kNumberTypeInt8
;
param
.
dstT
=
kNumberTypeFloat32
;
param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_
QuantDTypeCast
;
std
::
vector
<
int8_t
>
input
=
{
10
,
14
,
29
,
33
,
52
,
99
,
19
,
43
,
90
,
52
,
19
,
24
,
57
,
127
,
76
,
123
};
// int8_t input0[] = {1, 2, 10};
// int32_t a = input0[0] + 2;
std
::
vector
<
int
>
in_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
input_tensor
;
input_tensor
.
SetData
(
input
.
data
());
...
...
@@ -59,13 +57,13 @@ TEST_F(DequantizeTestFp32, DequantizeTest1) {
output_tensor
.
SetData
(
output
.
data
());
output_tensor
.
set_shape
(
out_shape
);
output_tensor
.
set_data_type
(
kNumberTypeFloat32
);
output_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
//
output_tensor.SetFormat(schema::Format_NHWC);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
;
outputs_tensor
.
emplace_back
(
&
output_tensor
);
lite
::
Context
ctx
;
ctx
.
threadNum
=
3
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_
OnnxInt8Dequantize
};
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_
QuantDTypeCast
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
...
...
@@ -80,4 +78,49 @@ TEST_F(DequantizeTestFp32, DequantizeTest1) {
CompareOutputData
(
output
.
data
(),
expect_out
,
out_size
,
0.000001
);
}
TEST_F
(
QuantDTypeCastTestFp32
,
QuantDTypeCastTest2
)
{
const
lite
::
tensor
::
QuantArg
quant_arg
=
{
0.3515625
,
-
57
};
QuantDTypeCastParameter
param
;
param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_QuantDTypeCast
;
param
.
dstT
=
kNumberTypeInt8
;
param
.
srcT
=
kNumberTypeFloat32
;
std
::
vector
<
float
>
input
=
{
1
,
2
,
5
,
6
,
10
,
-
20
,
3
,
8
,
18
,
10
,
3
,
4
,
11
,
16
,
15
,
25
};
std
::
vector
<
int
>
in_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
input_tensor
;
input_tensor
.
SetData
(
input
.
data
());
input_tensor
.
set_shape
(
in_shape
);
// input_tensor.SetFormat(schema::Format_NHWC);
input_tensor
.
set_data_type
(
kNumberTypeFloat32
);
input_tensor
.
AddQuantParam
(
quant_arg
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
;
inputs_tensor
.
emplace_back
(
&
input_tensor
);
const
int
out_size
=
16
;
int8_t
expect_out
[
16
]
=
{
-
54
,
-
51
,
-
43
,
-
40
,
-
29
,
-
114
,
-
48
,
-
34
,
-
6
,
-
29
,
-
48
,
-
46
,
-
26
,
-
11
,
-
14
,
14
};
std
::
vector
<
int8_t
>
output
(
16
);
std
::
vector
<
int
>
out_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
output_tensor
;
output_tensor
.
SetData
(
output
.
data
());
output_tensor
.
set_shape
(
out_shape
);
output_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
output_tensor
.
set_data_type
(
kNumberTypeInt8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
;
outputs_tensor
.
emplace_back
(
&
output_tensor
);
lite
::
Context
ctx
;
ctx
.
threadNum
=
3
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_QuantDTypeCast
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
param
),
&
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
kernel
->
Run
();
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
.
data
(),
expect_out
,
out_size
,
0.000001
);
}
}
// namespace mindspore
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录