Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
4b6b1ddc
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4b6b1ddc
编写于
7月 30, 2020
作者:
Z
zhongligeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
spacetodepth and spacetobatch fp32
dequantize and quantize
上级
3d7732a7
变更
27
隐藏空白更改
内联
并排
Showing
27 changed file
with
1730 addition
and
0 deletion
+1730
-0
mindspore/lite/src/ops/dequantize.cc
mindspore/lite/src/ops/dequantize.cc
+34
-0
mindspore/lite/src/ops/ops.cc
mindspore/lite/src/ops/ops.cc
+8
-0
mindspore/lite/src/ops/ops.h
mindspore/lite/src/ops/ops.h
+36
-0
mindspore/lite/src/ops/quantize.cc
mindspore/lite/src/ops/quantize.cc
+34
-0
mindspore/lite/src/ops/space_to_batch.cc
mindspore/lite/src/ops/space_to_batch.cc
+91
-0
mindspore/lite/src/ops/space_to_depth.cc
mindspore/lite/src/ops/space_to_depth.cc
+62
-0
mindspore/lite/src/populate_parameter.cc
mindspore/lite/src/populate_parameter.cc
+60
-0
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.cc
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.cc
+118
-0
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.h
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.h
+46
-0
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc
+106
-0
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h
+41
-0
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc
+89
-0
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h
+37
-0
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.cc
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.cc
+118
-0
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.h
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.h
+46
-0
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.cc
...spore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.cc
+29
-0
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.h
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.h
+28
-0
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_batch.cc
...lite/src/runtime/kernel/arm/opclib/fp32/space_to_batch.cc
+157
-0
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_batch.h
.../lite/src/runtime/kernel/arm/opclib/fp32/space_to_batch.h
+42
-0
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.cc
...lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.cc
+50
-0
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.h
.../lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.h
+27
-0
mindspore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.cc
...ore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.cc
+29
-0
mindspore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.h
...pore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.h
+28
-0
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/quantize_fp32_tests.cc
...est/ut/src/runtime/kernel/arm/fp32/quantize_fp32_tests.cc
+77
-0
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc
.../src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc
+163
-0
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc
.../src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc
+91
-0
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/dequantize_int8_tests.cc
...t/ut/src/runtime/kernel/arm/int8/dequantize_int8_tests.cc
+83
-0
未找到文件。
mindspore/lite/src/ops/dequantize.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/ops.h"
#include "include/errorcode.h"
#include "utils/log_adapter.h"
#include "src/ir/tensor.h"
namespace
mindspore
::
lite
{
int
Dequantize
::
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs_
,
std
::
vector
<
tensor
::
Tensor
*>
outputs_
)
{
MS_ASSERT
(
this
->
primitive
!=
nullptr
);
auto
input
=
inputs_
.
front
();
MS_ASSERT
(
input
!=
nullptr
);
auto
output
=
outputs_
.
front
();
MS_ASSERT
(
output
!=
nullptr
);
output
->
set_shape
(
input
->
shape
());
output
->
set_data_type
(
kNumberTypeFloat32
);
return
RET_OK
;
}
}
// namespace mindspore::lite
mindspore/lite/src/ops/ops.cc
浏览文件 @
4b6b1ddc
...
@@ -133,6 +133,14 @@ Primitive *Primitive::CreatePrimitive(schema::Primitive *primitive) {
...
@@ -133,6 +133,14 @@ Primitive *Primitive::CreatePrimitive(schema::Primitive *primitive) {
return
new
lite
::
OneHot
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
return
new
lite
::
OneHot
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_PriorBox
:
case
schema
::
PrimitiveType_PriorBox
:
return
new
lite
::
PriorBox
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
return
new
lite
::
PriorBox
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_SpaceToDepth
:
return
new
lite
::
SpaceToDepth
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_SpaceToBatch
:
return
new
lite
::
SpaceToBatch
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_OnnxInt8Dequantize
:
return
new
lite
::
Dequantize
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
case
schema
::
PrimitiveType_OnnxInt8Quantize
:
return
new
lite
::
Quantize
(
const_cast
<
schema
::
Primitive
*>
(
primitive
));
default:
default:
break
;
break
;
}
}
...
...
mindspore/lite/src/ops/ops.h
浏览文件 @
4b6b1ddc
...
@@ -596,6 +596,22 @@ class BatchToSpace : public Primitive {
...
@@ -596,6 +596,22 @@ class BatchToSpace : public Primitive {
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
};
};
class
SpaceToBatch
:
public
Primitive
{
public:
explicit
SpaceToBatch
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
const
schema
::
SpaceToBatch
*
GetAttribute
()
const
{
return
this
->
primitive
->
value_as_SpaceToBatch
();
}
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
std
::
vector
<
int
>
BlockSizes
()
{
return
block_sizes_
;}
std
::
vector
<
int
>
Paddings
()
{
return
block_sizes_
;}
std
::
vector
<
int
>
InShape
()
{
return
block_sizes_
;}
std
::
vector
<
int
>
PaddedInShape
()
{
return
block_sizes_
;}
private:
std
::
vector
<
int
>
block_sizes_
;
std
::
vector
<
int
>
paddings_
;
std
::
vector
<
int
>
in_shape_
;
std
::
vector
<
int
>
padded_in_shape_
;
};
class
Crop
:
public
Primitive
{
class
Crop
:
public
Primitive
{
public:
public:
explicit
Crop
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
explicit
Crop
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
...
@@ -668,6 +684,26 @@ class PriorBox : public Primitive {
...
@@ -668,6 +684,26 @@ class PriorBox : public Primitive {
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
};
};
class
SpaceToDepth
:
public
Primitive
{
public:
explicit
SpaceToDepth
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
const
schema
::
SpaceToDepth
*
GetAttribute
()
const
{
return
this
->
primitive
->
value_as_SpaceToDepth
();
}
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
};
class
Dequantize
:
public
Primitive
{
public:
explicit
Dequantize
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
const
schema
::
OnnxInt8Dequantize
*
GetAttribute
()
const
{
return
this
->
primitive
->
value_as_OnnxInt8Dequantize
();
}
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
};
class
Quantize
:
public
Primitive
{
public:
explicit
Quantize
(
schema
::
Primitive
*
primitive
)
:
Primitive
(
primitive
)
{}
const
schema
::
OnnxInt8Quantize
*
GetAttribute
()
const
{
return
this
->
primitive
->
value_as_OnnxInt8Quantize
();
}
int
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
tensor
::
Tensor
*>
outputs
)
override
;
};
}
// namespace lite
}
// namespace lite
}
// namespace mindspore
}
// namespace mindspore
#endif // MINDSPORE_LITE_SRC_OPS_OPS_H_
#endif // MINDSPORE_LITE_SRC_OPS_OPS_H_
mindspore/lite/src/ops/quantize.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/ops.h"
#include "include/errorcode.h"
#include "utils/log_adapter.h"
#include "src/ir/tensor.h"
namespace
mindspore
::
lite
{
int
Quantize
::
InferShape
(
std
::
vector
<
tensor
::
Tensor
*>
inputs_
,
std
::
vector
<
tensor
::
Tensor
*>
outputs_
)
{
MS_ASSERT
(
this
->
primitive
!=
nullptr
);
auto
input
=
inputs_
.
front
();
MS_ASSERT
(
input
!=
nullptr
);
auto
output
=
outputs_
.
front
();
MS_ASSERT
(
output
!=
nullptr
);
output
->
set_shape
(
input
->
shape
());
output
->
set_data_type
(
kNumberTypeInt8
);
return
RET_OK
;
}
}
// namespace mindspore::lite
mindspore/lite/src/ops/space_to_batch.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "src/ops/ops.h"
#include "include/errorcode.h"
#include "utils/log_adapter.h"
#include "src/ir/tensor.h"
namespace
mindspore
::
lite
{
namespace
{
constexpr
int
kSpaceToBatchNDOutputNum
=
1
;
constexpr
int
kSpaceToBatchNDInputNum
=
1
;
constexpr
int
kBlockSizesSize
=
2
;
constexpr
int
kPaddingsSize
=
4
;
}
// namespace
int
SpaceToBatch
::
InferShape
(
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs
)
{
MS_ASSERT
(
this
->
primitive
!=
nullptr
);
if
(
outputs
.
size
()
!=
kSpaceToBatchNDOutputNum
||
inputs
.
size
()
!=
kSpaceToBatchNDInputNum
)
{
MS_LOG
(
ERROR
)
<<
"Invalid output/input size! output size: "
<<
outputs
.
size
()
<<
",input size: "
<<
inputs
.
size
();
return
RET_PARAM_INVALID
;
}
auto
input
=
inputs
.
at
(
0
);
if
(
input
->
GetFormat
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"space_to_batch only support NHWC now!"
;
return
RET_FORMAT_ERR
;
}
auto
input_shape
=
input
->
shape
();
if
(
input_shape
.
size
()
!=
kDimension_4d
)
{
MS_LOG
(
ERROR
)
<<
"input shape dimension size should == "
<<
kDimension_4d
;
return
RET_PARAM_INVALID
;
}
auto
prim
=
this
->
primitive
->
value_as_SpaceToBatch
();
if
(
prim
->
blockShape
()
->
size
()
!=
kBlockSizesSize
)
{
MS_LOG
(
ERROR
)
<<
"Block shape size should be "
<<
kBlockSizesSize
;
return
RET_PARAM_INVALID
;
}
if
(
prim
->
paddings
()
->
size
()
!=
kPaddingsSize
)
{
MS_LOG
(
ERROR
)
<<
"Crops size should be "
<<
kPaddingsSize
;
return
RET_PARAM_INVALID
;
}
for
(
auto
iter
=
prim
->
blockShape
()
->
begin
();
iter
!=
prim
->
blockShape
()
->
end
();
++
iter
)
{
block_sizes_
.
emplace_back
(
*
iter
);
}
in_shape_
.
clear
();
padded_in_shape_
.
clear
();
paddings_
.
clear
();
in_shape_
.
emplace_back
(
input_shape
.
at
(
kNHWC_n_index
));
padded_in_shape_
.
emplace_back
(
input_shape
.
at
(
kNHWC_n_index
));
for
(
int
i
=
0
;
i
<
kBlockSizesSize
;
i
++
)
{
in_shape_
.
emplace_back
(
input_shape
.
at
(
i
+
1
));
padded_in_shape_
.
emplace_back
(
input_shape
.
at
(
i
+
1
)
+
(
paddings_
.
at
(
2
*
i
)
+
paddings_
.
at
(
2
*
i
+
1
)));
paddings_
.
emplace_back
(
paddings_
.
at
(
2
*
i
));
paddings_
.
emplace_back
(
paddings_
.
at
(
2
*
i
+
1
));
if
(
paddings_
.
back
()
%
block_sizes_
.
at
(
i
))
{
MS_LOG
(
ERROR
)
<<
"Padded shape does not divide block size "
<<
block_sizes_
.
at
(
i
);
return
RET_PARAM_INVALID
;
}
}
in_shape_
.
emplace_back
(
input_shape
.
at
(
kNHWC_c_index
));
padded_in_shape_
.
emplace_back
(
input_shape
.
at
(
kNHWC_c_index
));
std
::
vector
<
int32_t
>
output_shape
(
input_shape
.
size
());
output_shape
[
kNHWC_n_index
]
=
input_shape
[
kNHWC_n_index
]
*
(
block_sizes_
[
kNHWC_n_index
]
*
block_sizes_
[
kNHWC_h_index
]);
output_shape
[
kNHWC_h_index
]
=
input_shape
[
kNHWC_h_index
]
/
block_sizes_
[
kNHWC_n_index
];
output_shape
[
kNHWC_w_index
]
=
input_shape
[
kNHWC_w_index
]
/
block_sizes_
[
kNHWC_h_index
];
output_shape
[
kNHWC_c_index
]
=
input_shape
[
kNHWC_c_index
];
outputs
[
0
]
->
set_shape
(
output_shape
);
outputs
[
0
]
->
set_data_type
(
input
->
data_type
());
return
RET_OK
;
}
}
// namespace mindspore::lite
mindspore/lite/src/ops/space_to_depth.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "src/ops/ops.h"
#include "include/errorcode.h"
#include "utils/log_adapter.h"
#include "src/ir/tensor.h"
namespace
mindspore
::
lite
{
namespace
{
constexpr
int
kSpaceToDepthOutputNum
=
1
;
constexpr
int
kSpaceToDepthInputNum
=
1
;
}
int
SpaceToDepth
::
InferShape
(
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs
,
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs
)
{
MS_ASSERT
(
this
->
primitive
!=
nullptr
);
if
(
outputs
.
size
()
!=
kSpaceToDepthOutputNum
||
inputs
.
size
()
!=
kSpaceToDepthInputNum
)
{
MS_LOG
(
ERROR
)
<<
"Invalid output/input size! output size: "
<<
outputs
.
size
()
<<
",input size: "
<<
inputs
.
size
();
return
RET_PARAM_INVALID
;
}
auto
input
=
inputs
.
at
(
0
);
if
(
input
->
GetFormat
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"space_to_depth only support NHWC now!"
;
return
RET_FORMAT_ERR
;
}
auto
input_shape
=
input
->
shape
();
if
(
input_shape
.
size
()
!=
kDimension_4d
)
{
MS_LOG
(
ERROR
)
<<
"input shape dimension size should == "
<<
kDimension_4d
;
return
RET_PARAM_INVALID
;
}
auto
prim
=
this
->
primitive
->
value_as_SpaceToDepth
();
int32_t
block_size
=
prim
->
blockSize
();
if
(
input_shape
[
kNHWC_c_index
]
%
(
block_size
*
block_size
)
!=
0
||
input_shape
[
kNHWC_c_index
]
==
0
)
{
MS_LOG
(
ERROR
)
<<
"input dimension c size "
<<
input_shape
[
kNHWC_c_index
]
<<
" should be mulitple of block_size("
<<
block_size
<<
") * block_size)!"
;
return
RET_PARAM_INVALID
;
}
std
::
vector
<
int32_t
>
output_shape
(
input_shape
.
size
());
output_shape
[
kNHWC_n_index
]
=
input_shape
[
kNHWC_n_index
];
output_shape
[
kNHWC_h_index
]
=
input_shape
[
kNHWC_h_index
]
/
block_size
;
output_shape
[
kNHWC_w_index
]
=
input_shape
[
kNHWC_w_index
]
/
block_size
;
output_shape
[
kNHWC_c_index
]
=
input_shape
[
kNHWC_c_index
]
*
(
block_size
*
block_size
);
outputs
[
0
]
->
set_shape
(
output_shape
);
outputs
[
0
]
->
set_data_type
(
input
->
data_type
());
return
RET_OK
;
}
}
// namespace mindspore::lite
mindspore/lite/src/populate_parameter.cc
浏览文件 @
4b6b1ddc
...
@@ -63,6 +63,10 @@
...
@@ -63,6 +63,10 @@
#include "src/runtime/kernel/arm/opclib/fp32/one_hot.h"
#include "src/runtime/kernel/arm/opclib/fp32/one_hot.h"
#include "src/runtime/kernel/arm/opclib/fp32/strided_slice.h"
#include "src/runtime/kernel/arm/opclib/fp32/strided_slice.h"
#include "src/runtime/kernel/arm/base/prior_box.h"
#include "src/runtime/kernel/arm/base/prior_box.h"
#include "src/runtime/kernel/arm/opclib/fp32/space_to_depth.h"
#include "src/runtime/kernel/arm/opclib/fp32/space_to_batch.h"
#include "src/runtime/kernel/arm/opclib/int8/dequantize.h"
#include "src/runtime/kernel/arm/opclib/fp32/quantize.h"
namespace
mindspore
::
kernel
{
namespace
mindspore
::
kernel
{
FillParameter
*
PopulateFillParam
(
const
lite
::
Primitive
*
primitive
)
{
FillParameter
*
PopulateFillParam
(
const
lite
::
Primitive
*
primitive
)
{
...
@@ -873,6 +877,22 @@ DepthToSpaceParameter *PopulateDepthToSpaceParam(const lite::Primitive *primitiv
...
@@ -873,6 +877,22 @@ DepthToSpaceParameter *PopulateDepthToSpaceParam(const lite::Primitive *primitiv
return
parameter
;
return
parameter
;
}
}
SpaceToDepthParameter
*
PopulateSpaceToDepthParam
(
const
lite
::
Primitive
*
primitive
)
{
SpaceToDepthParameter
*
parameter
=
new
(
std
::
nothrow
)
SpaceToDepthParameter
();
if
(
parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new SpaceToDepthParameter failed."
;
return
nullptr
;
}
auto
param
=
primitive
->
Value
()
->
value_as_DepthToSpace
();
parameter
->
op_parameter_
.
type_
=
primitive
->
Type
();
parameter
->
block_size_
=
param
->
blockSize
();
if
(
param
->
format
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"Currently only NHWC format is supported."
;
return
nullptr
;
}
return
parameter
;
}
ResizeParameter
*
PopulateResizeParameter
(
const
lite
::
Primitive
*
primitive
)
{
ResizeParameter
*
PopulateResizeParameter
(
const
lite
::
Primitive
*
primitive
)
{
ResizeParameter
*
parameter
=
new
(
std
::
nothrow
)
ResizeParameter
();
ResizeParameter
*
parameter
=
new
(
std
::
nothrow
)
ResizeParameter
();
if
(
parameter
==
nullptr
)
{
if
(
parameter
==
nullptr
)
{
...
@@ -961,6 +981,24 @@ FlattenParameter *PopulateFlattenParameter(const lite::Primitive *primitive) {
...
@@ -961,6 +981,24 @@ FlattenParameter *PopulateFlattenParameter(const lite::Primitive *primitive) {
return
parameter
;
return
parameter
;
}
}
DequantizeParameter
*
PopulateDequantizeParameter
(
const
lite
::
Primitive
*
primitive
)
{
DequantizeParameter
*
parameter
=
new
(
std
::
nothrow
)
DequantizeParameter
();
if
(
parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new DequantizeParameter fail!"
;
return
nullptr
;
}
return
parameter
;
}
QuantizeParameter
*
PopulateQuantizeParameter
(
const
lite
::
Primitive
*
primitive
)
{
QuantizeParameter
*
parameter
=
new
(
std
::
nothrow
)
QuantizeParameter
();
if
(
parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new QuantizeParameter fail!"
;
return
nullptr
;
}
return
parameter
;
}
StridedSliceParameter
*
PopulateStridedSliceParam
(
const
lite
::
Primitive
*
primitive
)
{
StridedSliceParameter
*
PopulateStridedSliceParam
(
const
lite
::
Primitive
*
primitive
)
{
StridedSliceParameter
*
parameter
=
new
(
std
::
nothrow
)
StridedSliceParameter
();
StridedSliceParameter
*
parameter
=
new
(
std
::
nothrow
)
StridedSliceParameter
();
if
(
parameter
==
nullptr
)
{
if
(
parameter
==
nullptr
)
{
...
@@ -1045,6 +1083,24 @@ PriorBoxParameter *PopulatePriorBoxParameter(const lite::Primitive *primitive) {
...
@@ -1045,6 +1083,24 @@ PriorBoxParameter *PopulatePriorBoxParameter(const lite::Primitive *primitive) {
return
param
;
return
param
;
}
}
SpaceToBatchParameter
*
PopulateSpaceToBatchParam
(
const
lite
::
Primitive
*
primitive
)
{
SpaceToBatchParameter
*
parameter
=
new
(
std
::
nothrow
)
SpaceToBatchParameter
();
if
(
parameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new SpaceToBatchParameter failed."
;
return
nullptr
;
}
parameter
->
op_parameter_
.
type_
=
primitive
->
Type
();
auto
block_sizes
=
((
lite
::
SpaceToBatch
*
)
primitive
)
->
BlockSizes
();
(
void
)
memcpy
(
parameter
->
block_sizes_
,
(
block_sizes
.
data
()),
block_sizes
.
size
()
*
sizeof
(
int
));
auto
paddings
=
((
lite
::
SpaceToBatch
*
)
primitive
)
->
Paddings
();
(
void
)
memcpy
(
parameter
->
paddings_
,
(
paddings
.
data
()),
paddings
.
size
()
*
sizeof
(
int
));
auto
in_shape
=
((
lite
::
SpaceToBatch
*
)
primitive
)
->
InShape
();
(
void
)
memcpy
(
parameter
->
in_shape_
,
(
in_shape
.
data
()),
in_shape
.
size
()
*
sizeof
(
int
));
auto
padded_in_shape
=
((
lite
::
SpaceToBatch
*
)
primitive
)
->
PaddedInShape
();
(
void
)
memcpy
(
parameter
->
padded_in_shape_
,
(
padded_in_shape
.
data
()),
padded_in_shape
.
size
()
*
sizeof
(
int
));
return
parameter
;
}
OpParameter
*
PopulateParameter
(
const
lite
::
Primitive
*
primitive
)
{
OpParameter
*
PopulateParameter
(
const
lite
::
Primitive
*
primitive
)
{
MS_EXCEPTION_IF_NULL
(
primitive
);
MS_EXCEPTION_IF_NULL
(
primitive
);
auto
op_type
=
primitive
->
Type
();
auto
op_type
=
primitive
->
Type
();
...
@@ -1166,6 +1222,10 @@ OpParameter *PopulateParameter(const lite::Primitive *primitive) {
...
@@ -1166,6 +1222,10 @@ OpParameter *PopulateParameter(const lite::Primitive *primitive) {
return
reinterpret_cast
<
OpParameter
*>
(
PopulateAddNParam
(
primitive
));
return
reinterpret_cast
<
OpParameter
*>
(
PopulateAddNParam
(
primitive
));
case
schema
::
PrimitiveType_PriorBox
:
case
schema
::
PrimitiveType_PriorBox
:
return
reinterpret_cast
<
OpParameter
*>
(
PopulatePriorBoxParameter
(
primitive
));
return
reinterpret_cast
<
OpParameter
*>
(
PopulatePriorBoxParameter
(
primitive
));
case
schema
::
PrimitiveType_OnnxInt8Dequantize
:
return
reinterpret_cast
<
OpParameter
*>
(
PopulateDequantizeParameter
(
primitive
));
case
schema
::
PrimitiveType_OnnxInt8Quantize
:
return
reinterpret_cast
<
OpParameter
*>
(
PopulateQuantizeParameter
(
primitive
));
default:
default:
break
;
break
;
}
}
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/quantize.h"
#include <vector>
#include "src/runtime/kernel/arm/opclib/fp32/quantize.h"
#include "src/kernel_registry.h"
#include "src/runtime/runtime_api.h"
#include "schema/model_generated.h"
#include "include/errorcode.h"
using
mindspore
::
kernel
::
KERNEL_ARCH
::
kCPU
;
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
schema
::
PrimitiveType_OnnxInt8Quantize
;
namespace
mindspore
::
kernel
{
namespace
{
constexpr
int
kQuantizeInputNum
=
1
;
constexpr
int
kQuantizeOutputNum
=
1
;
}
// namespace
int
QuantizeCPUKernel
::
Init
()
{
if
(
inputs_
.
size
()
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"inputs number should be 1, but "
<<
inputs_
.
size
()
<<
" is given."
;
return
RET_ERROR
;
}
if
(
outputs_
.
size
()
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"outputs number should be 1, but "
<<
inputs_
.
size
()
<<
" is given."
;
return
RET_ERROR
;
}
auto
in_tensor
=
inputs_
.
front
();
num_unit_
=
static_cast
<
int
>
(
in_tensor
->
DataSize
());
thread_n_num_
=
MSMIN
(
thread_num_
,
num_unit_
);
thread_n_stride_
=
UP_DIV
(
num_unit_
,
thread_n_num_
);
return
RET_OK
;
}
int
QuantizeCPUKernel
::
ReSize
()
{
return
RET_OK
;
}
int
QuantizeCPUKernel
::
Quantize
(
int
task_id
)
{
int
num_unit_thread
=
MSMIN
(
thread_n_stride_
,
num_unit_
-
task_id
*
thread_n_stride_
);
if
(
num_unit_thread
<=
0
)
{
return
RET_OK
;
}
int
thread_offset
=
task_id
*
thread_n_stride_
;
auto
quant_arg
=
inputs_
.
front
()
->
GetQuantParams
().
front
();
int
ret
=
QuantizeToInt8
(
input_ptr_
+
thread_offset
,
output_ptr_
+
thread_offset
,
quant_arg
.
scale
,
quant_arg
.
zeroPoint
,
num_unit_thread
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Quantize error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
QuantizeRun
(
int
task_id
,
LiteParallelGroupEnv
*
penv
,
void
*
cdata
)
{
auto
g_kernel
=
reinterpret_cast
<
QuantizeCPUKernel
*>
(
cdata
);
auto
ret
=
g_kernel
->
Quantize
(
task_id
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"QuantizeRun error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
QuantizeCPUKernel
::
Run
()
{
input_ptr_
=
reinterpret_cast
<
float
*>
(
inputs_
[
0
]
->
Data
());
output_ptr_
=
reinterpret_cast
<
int8_t
*>
(
outputs_
[
0
]
->
Data
());
int
ret
=
LiteBackendParallelLaunch
(
QuantizeRun
,
this
,
thread_n_num_
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Scale error error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuQuantizeFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
QuantizeCPUKernel
(
opParameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new QuantizeCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Init kernel failed! name: "
<<
opParameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
opParameter
->
type_
));
delete
kernel
;
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_OnnxInt8Quantize
,
CpuQuantizeFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp32/quantize.h
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_QUANTIZE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_QUANTIZE_H_
#include <vector>
#include "src/lite_kernel.h"
namespace
mindspore
::
kernel
{
class
QuantizeCPUKernel
:
public
LiteKernel
{
public:
QuantizeCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
thread_num_
(
ctx
->
threadNum
)
{}
~
QuantizeCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
;
int
Run
()
override
;
int
Quantize
(
int
task_id
);
private:
int
thread_num_
;
int
thread_n_num_
;
int
thread_n_stride_
;
int
num_unit_
;
float
*
input_ptr_
;
int8_t
*
output_ptr_
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_QUANTIZE_H_
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/space_to_batch.h"
#include <vector>
#include "schema/ops_generated.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/fp32/space_to_batch.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
#include "include/errorcode.h"
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_FORMAT_ERR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
lite
::
RET_OP_EXECUTE_FAILURE
;
using
mindspore
::
schema
::
PrimitiveType_SpaceToBatch
;
namespace
mindspore
::
kernel
{
int
SpaceToBatchCPUKernel
::
Init
()
{
if
(
inputs_
[
0
]
->
GetFormat
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"space_to_batch only support NHWC now!"
;
return
RET_FORMAT_ERR
;
}
SpaceToBatchParameter
*
param
=
reinterpret_cast
<
SpaceToBatchParameter
*>
(
this
->
opParameter
);
for
(
int
i
=
0
;
i
<
SPACE_TO_BATCH_PADDINGS_SIZE
;
++
i
)
{
if
(
param
->
paddings_
[
i
]
!=
0
)
{
param
->
need_paddings_
=
true
;
break
;
}
}
param
->
n_dims_
=
DIMENSION_4D
;
param
->
n_space_dims_
=
SPACE_TO_BATCH_BLOCK_SIZES_SIZE
;
param
->
num_elements_
=
EnumElement
(
param
->
in_shape_
,
param
->
n_dims_
);
param
->
num_elements_padded_
=
EnumElement
(
param
->
padded_in_shape_
,
param
->
n_dims_
);
return
RET_OK
;
}
int
SpaceToBatchCPUKernel
::
Run
()
{
auto
input
=
inputs_
[
0
];
auto
output
=
outputs_
[
0
];
input_ptr_
=
reinterpret_cast
<
const
float
*>
(
input
->
Data
());
output_ptr_
=
reinterpret_cast
<
float
*>
(
output
->
Data
());
SpaceToBatchParameter
*
param
=
reinterpret_cast
<
SpaceToBatchParameter
*>
(
this
->
opParameter
);
int
ret
;
float
*
tmp_space
[
3
]
=
{
nullptr
,
nullptr
,
nullptr
};
if
(
param
->
need_paddings_
)
{
tmp_space
[
0
]
=
reinterpret_cast
<
float
*>
(
malloc
(
param
->
num_elements_padded_
*
sizeof
(
float
)));
(
void
)
memset
(
tmp_space
[
0
],
0
,
param
->
num_elements_padded_
);
tmp_space
[
1
]
=
reinterpret_cast
<
float
*>
(
malloc
(
param
->
num_elements_padded_
*
sizeof
(
float
)));
(
void
)
memset
(
tmp_space
[
1
],
0
,
param
->
num_elements_padded_
);
tmp_space
[
2
]
=
reinterpret_cast
<
float
*>
(
malloc
(
param
->
num_elements_padded_
*
sizeof
(
float
)));
(
void
)
memset
(
tmp_space
[
2
],
0
,
param
->
num_elements_padded_
);
ret
=
SpaceToBatch
(
input_ptr_
,
output_ptr_
,
*
param
,
tmp_space
);
}
else
{
ret
=
SpaceToBatch
(
input_ptr_
,
output_ptr_
,
*
param
,
tmp_space
);
}
if
(
ret
!=
OPCLIB_OK
)
{
MS_LOG
(
ERROR
)
<<
"Do space to batch fails!"
;
return
RET_OP_EXECUTE_FAILURE
;
}
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuSpaceToBatchFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
SpaceToBatchCPUKernel
(
opParameter
,
inputs
,
outputs
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new SpaceToBatchCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
opParameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
opParameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_SpaceToBatch
,
CpuSpaceToBatchFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch.h
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_SPACE_TO_BATCH_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_SPACE_TO_BATCH_H_
#include <vector>
#include "src/lite_kernel.h"
namespace
mindspore
::
kernel
{
class
SpaceToBatchCPUKernel
:
public
LiteKernel
{
public:
SpaceToBatchCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{}
~
SpaceToBatchCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
}
int
Run
()
override
;
private:
const
float
*
input_ptr_
;
float
*
output_ptr_
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_SPACE_TO_BATCH_H_
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/space_to_depth.h"
#include <vector>
#include "schema/ops_generated.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/opclib/fp32/space_to_depth.h"
#include "include/errorcode.h"
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_FORMAT_ERR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
lite
::
RET_PARAM_INVALID
;
using
mindspore
::
schema
::
PrimitiveType_SpaceToDepth
;
namespace
mindspore
::
kernel
{
int
SpaceToDepthCPUKernel
::
Init
()
{
if
(
inputs_
[
0
]
->
GetFormat
()
!=
schema
::
Format_NHWC
)
{
MS_LOG
(
ERROR
)
<<
"space_to_depth only support NHWC now!"
;
return
RET_FORMAT_ERR
;
}
SpaceToDepthParameter
*
param
=
reinterpret_cast
<
SpaceToDepthParameter
*>
(
opParameter
);
if
(
param
->
block_size_
<=
0
)
{
MS_LOG
(
ERROR
)
<<
"Input block_size should > 0!"
;
return
RET_PARAM_INVALID
;
}
return
RET_OK
;
}
int
SpaceToDepthCPUKernel
::
Run
()
{
auto
input
=
inputs_
[
0
];
auto
output
=
outputs_
[
0
];
const
float
*
input_data
=
static_cast
<
const
float
*>
(
input
->
Data
());
float
*
output_data
=
static_cast
<
float
*>
(
output
->
Data
());
auto
in_shape
=
input
->
shape
();
auto
out_shape
=
output
->
shape
();
SpaceToDepthParameter
*
param
=
reinterpret_cast
<
SpaceToDepthParameter
*>
(
opParameter
);
if
(
input
->
GetFormat
()
==
schema
::
Format_NHWC
)
{
auto
ret
=
SpaceToDepthForNHWC
(
input_data
,
output_data
,
in_shape
.
data
(),
out_shape
.
data
(),
in_shape
.
size
(),
param
->
block_size_
);
return
ret
;
}
else
{
MS_LOG
(
ERROR
)
<<
"Only support NHWC now!"
;
return
RET_ERROR
;
}
}
kernel
::
LiteKernel
*
CpuSpaceToDepthFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
SpaceToDepthCPUKernel
(
opParameter
,
inputs
,
outputs
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new SpaceToDepthCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
delete
kernel
;
MS_LOG
(
ERROR
)
<<
"Init kernel failed, name: "
<<
opParameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
opParameter
->
type_
));
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeFloat32
,
PrimitiveType_SpaceToDepth
,
CpuSpaceToDepthFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth.h
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_SPACE_TO_DEPTH_H_
#define MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_SPACE_TO_DEPTH_H_
#include <vector>
#include "src/lite_kernel.h"
#include "ir/anf.h"
namespace
mindspore
::
kernel
{
class
SpaceToDepthCPUKernel
:
public
LiteKernel
{
public:
SpaceToDepthCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
)
{}
~
SpaceToDepthCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
{
return
0
;
};
int
Run
()
override
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_FP32_SPACE_TO_DEPTH_H_
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/dequantize.h"
#include <vector>
#include "src/runtime/kernel/arm/opclib/int8/dequantize.h"
#include "src/runtime/runtime_api.h"
#include "src/kernel_registry.h"
#include "schema/model_generated.h"
#include "include/errorcode.h"
using
mindspore
::
kernel
::
KERNEL_ARCH
::
kCPU
;
using
mindspore
::
lite
::
KernelRegistrar
;
using
mindspore
::
lite
::
RET_ERROR
;
using
mindspore
::
lite
::
RET_OK
;
using
mindspore
::
schema
::
PrimitiveType_OnnxInt8Dequantize
;
namespace
mindspore
::
kernel
{
namespace
{
constexpr
int
kDequantizeInputNum
=
1
;
constexpr
int
kDequantizeOutputNum
=
1
;
}
// namespace
int
DequantizeCPUKernel
::
Init
()
{
if
(
inputs_
.
size
()
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"inputs number should be 1, but "
<<
inputs_
.
size
()
<<
" is given."
;
return
RET_ERROR
;
}
if
(
outputs_
.
size
()
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"outputs number should be 1, but "
<<
inputs_
.
size
()
<<
" is given."
;
return
RET_ERROR
;
}
auto
in_tensor
=
inputs_
.
front
();
num_unit_
=
static_cast
<
int
>
(
in_tensor
->
DataSize
());
thread_n_num_
=
MSMIN
(
thread_num_
,
num_unit_
);
thread_n_stride_
=
UP_DIV
(
num_unit_
,
thread_n_num_
);
return
RET_OK
;
}
int
DequantizeCPUKernel
::
ReSize
()
{
return
RET_OK
;
}
int
DequantizeCPUKernel
::
Dequantize
(
int
task_id
)
{
int
num_unit_thread
=
MSMIN
(
thread_n_stride_
,
num_unit_
-
task_id
*
thread_n_stride_
);
if
(
num_unit_thread
<=
0
)
{
return
RET_OK
;
}
int
thread_offset
=
task_id
*
thread_n_stride_
;
auto
quant_arg
=
inputs_
.
front
()
->
GetQuantParams
().
front
();
int
ret
=
DequantizeInt8
(
input_ptr_
+
thread_offset
,
output_ptr_
+
thread_offset
,
quant_arg
.
scale
,
quant_arg
.
zeroPoint
,
num_unit_thread
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Dequantize error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
DequantizeRun
(
int
task_id
,
LiteParallelGroupEnv
*
penv
,
void
*
cdata
)
{
auto
g_kernel
=
reinterpret_cast
<
DequantizeCPUKernel
*>
(
cdata
);
auto
ret
=
g_kernel
->
Dequantize
(
task_id
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"DequantizeRun error task_id["
<<
task_id
<<
"] error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
int
DequantizeCPUKernel
::
Run
()
{
input_ptr_
=
reinterpret_cast
<
int8_t
*>
(
inputs_
[
0
]
->
Data
());
output_ptr_
=
reinterpret_cast
<
float
*>
(
outputs_
[
0
]
->
Data
());
int
ret
=
LiteBackendParallelLaunch
(
DequantizeRun
,
this
,
thread_n_num_
);
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Scale error error_code["
<<
ret
<<
"]"
;
return
RET_ERROR
;
}
return
RET_OK
;
}
kernel
::
LiteKernel
*
CpuDequantizeFp32KernelCreator
(
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
OpParameter
*
opParameter
,
const
lite
::
Context
*
ctx
,
const
kernel
::
KernelKey
&
desc
)
{
if
(
opParameter
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Input opParameter is nullptr!"
;
return
nullptr
;
}
auto
*
kernel
=
new
(
std
::
nothrow
)
DequantizeCPUKernel
(
opParameter
,
inputs
,
outputs
,
ctx
);
if
(
kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new DequantizeCPUKernel fail!"
;
return
nullptr
;
}
auto
ret
=
kernel
->
Init
();
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Init kernel failed! name: "
<<
opParameter
->
name_
<<
", type: "
<<
schema
::
EnumNamePrimitiveType
(
static_cast
<
schema
::
PrimitiveType
>
(
opParameter
->
type_
));
delete
kernel
;
return
nullptr
;
}
return
kernel
;
}
REG_KERNEL
(
kCPU
,
kNumberTypeInt8
,
PrimitiveType_OnnxInt8Dequantize
,
CpuDequantizeFp32KernelCreator
)
}
// namespace mindspore::kernel
mindspore/lite/src/runtime/kernel/arm/int8/dequantize.h
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_DEQUANTIZE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_DEQUANTIZE_H_
#include <vector>
#include "src/lite_kernel.h"
namespace
mindspore
::
kernel
{
class
DequantizeCPUKernel
:
public
LiteKernel
{
public:
DequantizeCPUKernel
(
OpParameter
*
parameter
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
inputs
,
const
std
::
vector
<
lite
::
tensor
::
Tensor
*>
&
outputs
,
const
lite
::
Context
*
ctx
)
:
LiteKernel
(
parameter
,
inputs
,
outputs
),
thread_num_
(
ctx
->
threadNum
)
{}
~
DequantizeCPUKernel
()
=
default
;
int
Init
()
override
;
int
ReSize
()
override
;
int
Run
()
override
;
int
Dequantize
(
int
task_id
);
private:
int
thread_num_
;
int
thread_n_num_
;
int
thread_n_stride_
;
int
num_unit_
;
int8_t
*
input_ptr_
;
float
*
output_ptr_
;
};
}
// namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_DEQUANTIZE_H_
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/fp32/quantize.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
int
QuantizeToInt8
(
float
*
real_values
,
int8_t
*
quant_values
,
float
scale
,
int32_t
zp
,
int
size
)
{
if
(
quant_values
==
nullptr
||
real_values
==
nullptr
)
{
return
OPCLIB_PARAM_INVALID
;
}
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
quant_values
[
i
]
=
(
int8_t
)
round
(
real_values
[
i
]
/
scale
+
zp
);
}
return
OPCLIB_OK
;
}
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.h
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_QUANTIZE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_QUANTIZE_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
struct
QuantizeParameter
{
OpParameter
op_parameter_
;
};
int
QuantizeToInt8
(
float
*
real_values
,
int8_t
*
quant_values
,
float
scale
,
int32_t
zp
,
int
size
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_FP32_QUANTIZE_H_
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_batch.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/fp32/space_to_batch.h"
#include "src/runtime/kernel/arm/opclib/arithmetic_common.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
#include "src/runtime/kernel/arm/opclib/fp32/concat.h"
#include "src/runtime/kernel/arm/opclib/op_base.h"
int
EnumElement
(
int
*
shape
,
int
n_dims
)
{
int
total
=
1
;
for
(
int
i
=
0
;
i
<
n_dims
;
i
++
)
{
total
*=
shape
[
i
];
}
return
total
;
}
void
TransposeForNHWC
(
const
float
*
in_data
,
float
*
out_data
,
int
*
strides
,
int
*
out_strides
,
int
*
perm
,
int
*
output_shape
)
{
const
int
stride0
=
strides
[
perm
[
0
]];
const
int
stride1
=
strides
[
perm
[
1
]];
const
int
stride2
=
strides
[
perm
[
2
]];
const
int
stride3
=
strides
[
perm
[
3
]];
const
int
stride4
=
strides
[
perm
[
4
]];
const
int
out_stride0
=
out_strides
[
0
];
const
int
out_stride1
=
out_strides
[
1
];
const
int
out_stride2
=
out_strides
[
2
];
const
int
out_stride3
=
out_strides
[
3
];
const
int
out_stride4
=
out_strides
[
4
];
const
int
output0
=
output_shape
[
0
];
const
int
output1
=
output_shape
[
1
];
const
int
output2
=
output_shape
[
2
];
const
int
output3
=
output_shape
[
3
];
const
int
output4
=
output_shape
[
4
];
for
(
int
i
=
0
;
i
<
output0
;
++
i
)
{
int
out_stride0_i
=
i
*
out_stride0
;
int
stride0_i
=
i
*
stride0
;
for
(
int
j
=
0
;
j
<
output1
;
++
j
)
{
int
out_stride1_j
=
j
*
out_stride1
;
int
stride1_j
=
j
*
stride1
;
for
(
int
k
=
0
;
k
<
output2
;
++
k
)
{
int
out_stride2_k
=
k
*
out_stride2
;
int
stride2_k
=
k
*
stride2
;
for
(
int
m
=
0
;
m
<
output3
;
++
m
)
{
int
out_stride3_m
=
m
*
out_stride3
;
int
stride3_m
=
m
*
stride3
;
for
(
int
n
=
0
;
n
<
output4
;
++
n
)
{
int
out_stride4_n
=
n
*
out_stride4
;
int
stride4_n
=
n
*
stride4
;
memcpy
(
out_data
+
out_stride0_i
+
out_stride1_j
+
out_stride2_k
+
out_stride3_m
+
out_stride4_n
,
in_data
+
stride0_i
+
stride1_j
+
stride2_k
+
stride3_m
+
stride4_n
,
stride4
*
sizeof
(
float
));
}
}
}
}
}
}
int
SpaceToBatchForNHWC
(
const
float
*
input
,
float
*
output
,
int
*
in_shape
,
int
shape_size
,
int
*
block_sizes
)
{
int
trans_in_shape
[
6
]
=
{
in_shape
[
0
],
in_shape
[
1
]
/
block_sizes
[
0
],
block_sizes
[
0
],
in_shape
[
2
]
/
block_sizes
[
1
],
block_sizes
[
1
],
in_shape
[
3
]};
int
trans_out_shape
[
6
]
=
{
in_shape
[
0
],
block_sizes
[
0
],
block_sizes
[
1
],
in_shape
[
1
]
/
block_sizes
[
0
],
in_shape
[
2
]
/
block_sizes
[
1
],
in_shape
[
3
]};
int
in_strides
[
C4NUM
+
2
];
ComputeStrides
(
trans_in_shape
,
in_strides
,
shape_size
+
2
);
int
out_strides
[
C4NUM
+
2
];
ComputeStrides
(
trans_out_shape
,
out_strides
,
shape_size
+
2
);
int
perm
[
6
]
=
{
0
,
2
,
4
,
1
,
3
,
5
};
TransposeForNHWC
(
input
,
output
,
in_strides
,
out_strides
,
perm
,
trans_out_shape
);
return
OPCLIB_OK
;
}
void
DoPadding
(
const
float
*
input
,
float
*
padded_input
,
SpaceToBatchParameter
param
,
float
*
tmp_space
[])
{
float
*
tmp
=
padded_input
;
(
void
)
memcpy
(
tmp
,
input
,
param
.
num_elements_
*
sizeof
(
float
));
float
*
target
=
tmp_space
[
0
];
float
*
tmp_zeros
=
tmp_space
[
1
];
float
*
tmp2
=
nullptr
;
int
cur_shape
[
param
.
n_dims_
],
cur_start_shape
[
param
.
n_dims_
],
cur_end_shape
[
param
.
n_dims_
],
cur_target_shape
[
param
.
n_dims_
];
float
*
concat_inputs
[
3
];
int
*
concat_shapes
[
4
];
for
(
int
i
=
0
;
i
<
param
.
n_dims_
;
i
++
)
{
cur_shape
[
i
]
=
param
.
in_shape_
[
i
];
cur_start_shape
[
i
]
=
param
.
in_shape_
[
i
];
cur_end_shape
[
i
]
=
param
.
in_shape_
[
i
];
cur_target_shape
[
i
]
=
param
.
in_shape_
[
i
];
}
for
(
int
i
=
0
;
i
<
param
.
n_space_dims_
;
++
i
)
{
if
(
param
.
padded_in_shape_
[
i
+
1
]
>
param
.
in_shape_
[
i
+
1
])
{
int
concat_idx
=
0
;
cur_target_shape
[
i
+
1
]
=
0
;
if
(
param
.
paddings_
[
2
*
i
]
!=
0
)
{
cur_start_shape
[
i
+
1
]
=
param
.
paddings_
[
2
*
i
];
concat_inputs
[
concat_idx
]
=
tmp_zeros
;
concat_shapes
[
concat_idx
++
]
=
cur_start_shape
;
cur_target_shape
[
i
+
1
]
+=
cur_start_shape
[
i
+
1
];
}
concat_inputs
[
concat_idx
]
=
tmp
;
concat_shapes
[
concat_idx
++
]
=
cur_shape
;
cur_target_shape
[
i
+
1
]
+=
cur_shape
[
i
+
1
];
if
(
param
.
paddings_
[
2
*
i
+
1
]
!=
0
)
{
cur_end_shape
[
i
+
1
]
=
param
.
paddings_
[
2
*
i
+
1
];
concat_inputs
[
concat_idx
]
=
tmp_zeros
;
concat_shapes
[
concat_idx
++
]
=
cur_end_shape
;
cur_target_shape
[
i
+
1
]
+=
cur_end_shape
[
i
+
1
];
}
concat_shapes
[
concat_idx
]
=
cur_target_shape
;
Concat
((
void
**
)
concat_inputs
,
concat_idx
,
i
+
1
,
concat_shapes
,
param
.
n_dims_
,
target
);
tmp2
=
tmp
;
tmp
=
target
;
target
=
tmp2
;
cur_start_shape
[
i
+
1
]
=
cur_end_shape
[
i
+
1
]
=
cur_shape
[
i
+
1
]
=
concat_shapes
[
concat_idx
][
i
+
1
];
}
}
if
(
padded_input
!=
tmp
)
{
memcpy
(
padded_input
,
tmp
,
param
.
num_elements_padded_
*
sizeof
(
float
));
}
}
int
SpaceToBatch
(
const
float
*
input
,
float
*
output
,
SpaceToBatchParameter
param
,
float
*
tmp_space
[
3
])
{
float
*
padded_input
;
int
ret
;
if
(
param
.
need_paddings_
)
{
if
(
tmp_space
[
0
]
==
nullptr
||
tmp_space
[
1
]
==
nullptr
||
tmp_space
[
2
]
==
nullptr
)
{
return
OPCLIB_NULL_PTR
;
}
padded_input
=
tmp_space
[
0
];
DoPadding
(
input
,
padded_input
,
param
,
tmp_space
+
1
);
}
if
(
param
.
need_paddings_
)
{
ret
=
SpaceToBatchForNHWC
(
padded_input
,
output
,
param
.
padded_in_shape_
,
param
.
n_dims_
,
param
.
block_sizes_
);
}
else
{
ret
=
SpaceToBatchForNHWC
(
input
,
output
,
param
.
padded_in_shape_
,
param
.
n_dims_
,
param
.
block_sizes_
);
}
return
ret
;
}
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_batch.h
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_BACKEND_ARM_OPCLIB_FP32_SPACE_TO_BATCH_H_
#define MINDSPORE_LITE_SRC_BACKEND_ARM_OPCLIB_FP32_SPACE_TO_BATCH_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
#define SPACE_TO_BATCH_BLOCK_SIZES_SIZE 2
#define SPACE_TO_BATCH_PADDINGS_SIZE 4
struct
SpaceToBatchParameter
{
OpParameter
op_parameter_
;
int
block_sizes_
[
8
];
int
paddings_
[
8
];
int
n_dims_
;
int
num_elements_
;
int
num_elements_padded_
;
int
n_space_dims_
;
int
in_shape_
[
8
];
int
padded_in_shape_
[
8
];
bool
need_paddings_
=
false
;
};
int
SpaceToBatch
(
const
float
*
input
,
float
*
output
,
SpaceToBatchParameter
param
,
float
*
tmp_space
[
3
]);
int
SpaceToBatchForNHWC
(
const
float
*
input
,
float
*
output
,
int
*
in_shape
,
int
shape_size
,
int
*
block_size
);
void
TransposeForNHWC
(
const
float
*
in_data
,
float
*
out_data
,
int
*
strides
,
int
*
out_strides
,
int
*
perm
,
int
*
output_shape
);
void
DoPadding
(
const
float
*
input
,
float
*
padded_input
,
SpaceToBatchParameter
param
);
int
EnumElement
(
int
*
shape
,
int
n_dims
);
#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_OPCLIB_FP32_SPACE_TO_BATCH_H_
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/fp32/space_to_depth.h"
#include "src/runtime/kernel/arm/opclib/arithmetic_common.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
#include "src/runtime/kernel/arm/opclib/op_base.h"
int
SpaceToDepthForNHWC
(
const
float
*
input
,
float
*
output
,
int
*
in_shape
,
int
*
out_shape
,
int
shape_size
,
int
block_size
)
{
if
(
input
==
nullptr
||
output
==
nullptr
)
{
return
OPCLIB_NULL_PTR
;
}
if
(
shape_size
!=
C4NUM
)
{
return
OPCLIB_PARAM_INVALID
;
}
int
in_strides
[
C4NUM
];
ComputeStrides
(
in_shape
,
in_strides
,
shape_size
);
int
out_strides
[
C4NUM
];
ComputeStrides
(
out_shape
,
out_strides
,
shape_size
);
for
(
int
i
=
0
;
i
<
out_shape
[
0
];
++
i
)
{
size_t
in_offset_n
=
i
*
in_strides
[
0
];
size_t
out_offset_n
=
i
*
out_strides
[
0
];
for
(
int
j
=
0
;
j
<
out_shape
[
1
];
++
j
)
{
size_t
in_offset_h
=
in_offset_n
+
j
*
block_size
*
in_strides
[
1
];
size_t
out_offset_h
=
out_offset_n
+
j
*
out_strides
[
1
];
for
(
int
k
=
0
;
k
<
out_shape
[
2
];
++
k
)
{
size_t
in_offset_w
=
in_offset_h
+
k
*
block_size
*
in_strides
[
2
];
size_t
out_offset_w
=
out_offset_h
+
k
*
out_strides
[
2
];
for
(
int
l
=
0
;
l
<
block_size
;
++
l
)
{
memcpy
(
output
+
out_offset_w
+
l
*
block_size
*
in_strides
[
2
],
input
+
in_offset_w
+
l
*
in_strides
[
1
],
block_size
*
in_strides
[
2
]
*
sizeof
(
float
));
}
}
}
}
return
OPCLIB_OK
;
}
mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.h
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_BACKEND_ARM_OPCLIB_FP32_SPACE_TO_DEPTH_H_
#define MINDSPORE_LITE_SRC_BACKEND_ARM_OPCLIB_FP32_SPACE_TO_DEPTH_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
struct
SpaceToDepthParameter
{
OpParameter
op_parameter_
;
int32_t
block_size_
;
};
int
SpaceToDepthForNHWC
(
const
float
*
input
,
float
*
output
,
int
*
in_shape
,
int
*
out_shape
,
int
shape_size
,
int
block_size
);
#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_OPCLIB_FP32_SPACE_TO_DEPTH_H_
mindspore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/opclib/int8/dequantize.h"
#include "src/runtime/kernel/arm/opclib/errorcode.h"
int
DequantizeInt8
(
int8_t
*
quant_values
,
float
*
real_values
,
float
scale
,
int32_t
zp
,
int
size
)
{
if
(
quant_values
==
nullptr
||
real_values
==
nullptr
)
{
return
OPCLIB_PARAM_INVALID
;
}
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
real_values
[
i
]
=
(
quant_values
[
i
]
+
zp
)
*
scale
;
}
return
OPCLIB_OK
;
}
mindspore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.h
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_DEQUANTIZE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_DEQUANTIZE_H_
#include "src/runtime/kernel/arm/opclib/op_base.h"
struct
DequantizeParameter
{
OpParameter
op_parameter_
;
};
int
DequantizeInt8
(
int8_t
*
quant_values
,
float
*
real_values
,
float
scale
,
int32_t
zp
,
int
size
);
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_OPCLIB_INT8_DEQUANTIZE_H_
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/quantize_fp32_tests.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/fp32/quantize.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/fp32/quantize.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
namespace
mindspore
{
class
QuantizeTestFp32
:
public
mindspore
::
Common
{
public:
QuantizeTestFp32
()
{}
};
TEST_F
(
QuantizeTestFp32
,
QuantizeTest1
)
{
const
lite
::
tensor
::
QuantArg
quant_arg
=
{
0.3515625
,
-
57
};
QuantizeParameter
param
;
param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_OnnxInt8Quantize
;
std
::
vector
<
float
>
input
=
{
1
,
2
,
5
,
6
,
10
,
-
20
,
3
,
8
,
18
,
10
,
3
,
4
,
11
,
16
,
15
,
25
};
std
::
vector
<
int
>
in_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
input_tensor
;
input_tensor
.
SetData
(
input
.
data
());
input_tensor
.
set_shape
(
in_shape
);
input_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
input_tensor
.
set_data_type
(
kNumberTypeFloat32
);
input_tensor
.
AddQuantParam
(
quant_arg
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
;
inputs_tensor
.
emplace_back
(
&
input_tensor
);
const
int
out_size
=
16
;
int8_t
expect_out
[
16
]
=
{
-
54
,
-
51
,
-
43
,
-
40
,
-
29
,
-
114
,
-
48
,
-
34
,
-
6
,
-
29
,
-
48
,
-
46
,
-
26
,
-
11
,
-
14
,
14
};
std
::
vector
<
int8_t
>
output
(
16
);
std
::
vector
<
int
>
out_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
output_tensor
;
output_tensor
.
SetData
(
output
.
data
());
output_tensor
.
set_shape
(
out_shape
);
output_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
output_tensor
.
set_data_type
(
kNumberTypeInt8
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
;
outputs_tensor
.
emplace_back
(
&
output_tensor
);
lite
::
Context
ctx
;
ctx
.
threadNum
=
3
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeFloat32
,
schema
::
PrimitiveType_OnnxInt8Quantize
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
param
),
&
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
kernel
->
Run
();
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
.
data
(),
expect_out
,
out_size
,
0.000001
);
}
}
// namespace mindspore
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_batch.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
namespace
mindspore
{
class
SpaceToBatchTestFp32
:
public
mindspore
::
Common
{
public:
SpaceToBatchTestFp32
()
{}
};
int
InitSpaceToBatchParameter
(
SpaceToBatchParameter
*
param
)
{
param
->
n_dims_
=
4
;
param
->
n_space_dims_
=
2
;
param
->
block_sizes_
[
0
]
=
2
;
param
->
block_sizes_
[
1
]
=
2
;
param
->
paddings_
[
0
]
=
2
;
param
->
paddings_
[
1
]
=
0
;
param
->
paddings_
[
2
]
=
2
;
param
->
paddings_
[
3
]
=
2
;
param
->
in_shape_
[
0
]
=
1
;
param
->
in_shape_
[
1
]
=
4
;
param
->
in_shape_
[
2
]
=
4
;
param
->
in_shape_
[
3
]
=
1
;
param
->
padded_in_shape_
[
0
]
=
1
;
param
->
padded_in_shape_
[
1
]
=
6
;
param
->
padded_in_shape_
[
2
]
=
8
;
param
->
padded_in_shape_
[
3
]
=
1
;
param
->
num_elements_
=
16
;
param
->
num_elements_padded_
=
48
;
param
->
need_paddings_
=
true
;
}
int
InitSpaceToBatchParameter2
(
SpaceToBatchParameter
*
param
)
{
param
->
block_sizes_
[
0
]
=
2
;
param
->
block_sizes_
[
1
]
=
2
;
param
->
paddings_
[
0
]
=
2
;
param
->
paddings_
[
1
]
=
0
;
param
->
paddings_
[
2
]
=
2
;
param
->
paddings_
[
3
]
=
2
;
param
->
in_shape_
[
0
]
=
1
;
param
->
in_shape_
[
1
]
=
4
;
param
->
in_shape_
[
2
]
=
4
;
param
->
in_shape_
[
3
]
=
1
;
param
->
padded_in_shape_
[
0
]
=
1
;
param
->
padded_in_shape_
[
1
]
=
6
;
param
->
padded_in_shape_
[
2
]
=
8
;
param
->
padded_in_shape_
[
3
]
=
1
;
}
TEST_F
(
SpaceToBatchTestFp32
,
SpaceToBatchTest1
)
{
float
input
[
16
]
=
{
1
,
2
,
5
,
6
,
10
,
20
,
3
,
8
,
18
,
10
,
3
,
4
,
11
,
55
,
15
,
25
};
const
int
out_size
=
16
;
float
expect_out
[
16
]
=
{
1
,
5
,
18
,
3
,
2
,
6
,
10
,
4
,
10
,
3
,
11
,
15
,
20
,
8
,
55
,
25
};
float
output
[
16
];
int
in_shape
[
4
]
=
{
1
,
4
,
4
,
1
};
int
out_shape
[
4
]
=
{
4
,
2
,
2
,
1
};
int
block_sizes
[
2
]
=
{
2
,
2
};
SpaceToBatchForNHWC
((
const
float
*
)
input
,
output
,
in_shape
,
4
,
block_sizes
);
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
,
expect_out
,
out_size
,
0.000001
);
}
TEST_F
(
SpaceToBatchTestFp32
,
SpaceToBatchTest2
)
{
SpaceToBatchParameter
param
;
InitSpaceToBatchParameter
(
&
param
);
float
input
[
16
]
=
{
1
,
2
,
5
,
6
,
10
,
20
,
3
,
8
,
18
,
10
,
3
,
4
,
11
,
55
,
15
,
25
};
const
int
out_size
=
48
;
float
expect_out
[
48
]
=
{
0
,
0
,
0
,
0
,
0
,
1
,
5
,
0
,
0
,
18
,
3
,
0
,
0
,
0
,
0
,
0
,
0
,
2
,
6
,
0
,
0
,
10
,
4
,
0
,
0
,
0
,
0
,
0
,
0
,
10
,
3
,
0
,
0
,
11
,
15
,
0
,
0
,
0
,
0
,
0
,
0
,
20
,
8
,
0
,
0
,
55
,
25
,
0
};
float
output
[
48
];
int
in_shape
[
4
]
=
{
1
,
4
,
4
,
1
};
int
out_shape
[
4
]
=
{
4
,
3
,
4
,
1
};
int
block_sizes
[
2
]
=
{
2
,
2
};
float
padded_input
[
48
]{},
tmp
[
48
]{},
tmp_zero
[
48
]{};
float
*
tmp_space
[
3
]
=
{
padded_input
,
tmp
,
tmp_zero
};
auto
ret
=
SpaceToBatch
((
const
float
*
)
input
,
output
,
param
,
tmp_space
);
std
::
cout
<<
"return "
<<
ret
<<
std
::
endl
;
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
,
expect_out
,
out_size
,
0.000001
);
}
TEST_F
(
SpaceToBatchTestFp32
,
SpaceToBatchTest3
)
{
SpaceToBatchParameter
param
;
InitSpaceToBatchParameter2
(
&
param
);
param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_SpaceToBatch
;
std
::
vector
<
float
>
input
=
{
1
,
2
,
5
,
6
,
10
,
20
,
3
,
8
,
18
,
10
,
3
,
4
,
11
,
55
,
15
,
25
};
std
::
vector
<
int
>
in_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
input_tensor
;
input_tensor
.
SetData
(
input
.
data
());
input_tensor
.
set_shape
(
in_shape
);
input_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
input_tensor
.
set_data_type
(
kNumberTypeFloat32
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
;
inputs_tensor
.
emplace_back
(
&
input_tensor
);
const
int
out_size
=
48
;
float
expect_out
[
48
]
=
{
0
,
0
,
0
,
0
,
0
,
1
,
5
,
0
,
0
,
18
,
3
,
0
,
0
,
0
,
0
,
0
,
0
,
2
,
6
,
0
,
0
,
10
,
4
,
0
,
0
,
0
,
0
,
0
,
0
,
10
,
3
,
0
,
0
,
11
,
15
,
0
,
0
,
0
,
0
,
0
,
0
,
20
,
8
,
0
,
0
,
55
,
25
,
0
};
std
::
vector
<
float
>
output
(
48
);
std
::
vector
<
int
>
out_shape
=
{
4
,
3
,
4
,
1
};
lite
::
tensor
::
Tensor
output_tensor
;
output_tensor
.
SetData
(
output
.
data
());
output_tensor
.
set_shape
(
out_shape
);
output_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
output_tensor
.
set_data_type
(
kNumberTypeFloat32
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
;
outputs_tensor
.
emplace_back
(
&
output_tensor
);
lite
::
Context
ctx
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeFloat32
,
schema
::
PrimitiveType_SpaceToBatch
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
param
),
&
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
kernel
->
Run
();
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
.
data
(),
expect_out
,
out_size
,
0.000001
);
}
}
// namespace mindspore
mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <iostream>
#include <memory>
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/fp32/space_to_depth.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
namespace
mindspore
{
class
SpaceToDepthTestFp32
:
public
mindspore
::
Common
{
public:
SpaceToDepthTestFp32
()
{}
};
TEST_F
(
SpaceToDepthTestFp32
,
SpaceToDepthTest1
)
{
float
input
[
16
]
=
{
1
,
2
,
5
,
6
,
10
,
20
,
3
,
8
,
18
,
10
,
3
,
4
,
11
,
55
,
15
,
25
};
const
int
out_size
=
16
;
float
expect_out
[
16
]
=
{
1
,
2
,
10
,
20
,
5
,
6
,
3
,
8
,
18
,
10
,
11
,
55
,
3
,
4
,
15
,
25
};
float
output
[
16
];
int
in_shape
[
4
]
=
{
1
,
4
,
4
,
1
};
int
out_shape
[
4
]
=
{
1
,
2
,
2
,
4
};
SpaceToDepthForNHWC
((
const
float
*
)
input
,
output
,
in_shape
,
out_shape
,
4
,
2
);
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
,
expect_out
,
out_size
,
0.000001
);
}
TEST_F
(
SpaceToDepthTestFp32
,
SpaceToDepthTest2
)
{
std
::
vector
<
float
>
input
=
{
1
,
2
,
5
,
6
,
10
,
20
,
3
,
8
,
18
,
10
,
3
,
4
,
11
,
55
,
15
,
25
};
std
::
vector
<
int
>
in_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
input_tensor
;
input_tensor
.
SetData
(
input
.
data
());
input_tensor
.
set_shape
(
in_shape
);
input_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
input_tensor
.
set_data_type
(
kNumberTypeFloat32
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
;
inputs_tensor
.
emplace_back
(
&
input_tensor
);
const
int
out_size
=
16
;
float
expect_out
[
16
]
=
{
1
,
2
,
10
,
20
,
5
,
6
,
3
,
8
,
18
,
10
,
11
,
55
,
3
,
4
,
15
,
25
};
std
::
vector
<
float
>
output
(
16
);
std
::
vector
<
int
>
out_shape
=
{
1
,
2
,
2
,
4
};
lite
::
tensor
::
Tensor
output_tensor
;
output_tensor
.
SetData
(
output
.
data
());
output_tensor
.
set_shape
(
out_shape
);
output_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
output_tensor
.
set_data_type
(
kNumberTypeFloat32
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
;
outputs_tensor
.
emplace_back
(
&
output_tensor
);
SpaceToDepthParameter
op_param
;
op_param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_SpaceToBatch
;
op_param
.
block_size_
=
2
;
lite
::
Context
ctx
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeFloat32
,
schema
::
PrimitiveType_SpaceToDepth
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
op_param
),
&
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
kernel
->
Run
();
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
.
data
(),
expect_out
,
out_size
,
0.000001
);
}
}
// namespace mindspore
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/dequantize_int8_tests.cc
0 → 100644
浏览文件 @
4b6b1ddc
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "utils/log_adapter.h"
#include "common/common_test.h"
#include "mindspore/lite/src/runtime/kernel/arm/int8/dequantize.h"
#include "mindspore/lite/src/runtime/kernel/arm/opclib/int8/dequantize.h"
#include "mindspore/lite/src/kernel_registry.h"
#include "mindspore/lite/src/lite_kernel.h"
namespace
mindspore
{
class
DequantizeTestFp32
:
public
mindspore
::
Common
{
public:
DequantizeTestFp32
()
{}
};
TEST_F
(
DequantizeTestFp32
,
DequantizeTest1
)
{
const
lite
::
tensor
::
QuantArg
quant_arg
{
0.21176
,
5
};
// quant_arg.scale = 100.0;
// quant_arg.zeroPoint = 20;
DequantizeParameter
param
;
param
.
op_parameter_
.
type_
=
schema
::
PrimitiveType_OnnxInt8Dequantize
;
std
::
vector
<
int8_t
>
input
=
{
10
,
14
,
29
,
33
,
52
,
99
,
19
,
43
,
90
,
52
,
19
,
24
,
57
,
127
,
76
,
123
};
// int8_t input0[] = {1, 2, 10};
// int32_t a = input0[0] + 2;
std
::
vector
<
int
>
in_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
input_tensor
;
input_tensor
.
SetData
(
input
.
data
());
input_tensor
.
set_shape
(
in_shape
);
input_tensor
.
set_data_type
(
kNumberTypeInt8
);
input_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
input_tensor
.
AddQuantParam
(
quant_arg
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
inputs_tensor
;
inputs_tensor
.
emplace_back
(
&
input_tensor
);
const
int
out_size
=
16
;
float
expect_out
[
16
]
=
{
3.1764
,
4.02344
,
7.19984
,
8.04688
,
12.07032
,
22.02304
,
5.08224
,
10.16448
,
20.1172
,
12.07032
,
5.082240
,
6.14104
,
13.12912
,
27.95232
,
17.15256
,
27.10528
};
std
::
vector
<
float
>
output
(
16
);
std
::
vector
<
int
>
out_shape
=
{
1
,
4
,
4
,
1
};
lite
::
tensor
::
Tensor
output_tensor
;
output_tensor
.
SetData
(
output
.
data
());
output_tensor
.
set_shape
(
out_shape
);
output_tensor
.
set_data_type
(
kNumberTypeFloat32
);
output_tensor
.
SetFormat
(
schema
::
Format_NHWC
);
std
::
vector
<
lite
::
tensor
::
Tensor
*>
outputs_tensor
;
outputs_tensor
.
emplace_back
(
&
output_tensor
);
lite
::
Context
ctx
;
ctx
.
threadNum
=
3
;
kernel
::
KernelKey
desc
=
{
kernel
::
KERNEL_ARCH
::
kCPU
,
kNumberTypeInt8
,
schema
::
PrimitiveType_OnnxInt8Dequantize
};
auto
creator
=
lite
::
KernelRegistry
::
GetInstance
()
->
GetCreator
(
desc
);
ASSERT_NE
(
creator
,
nullptr
);
kernel
::
LiteKernel
*
kernel
=
creator
(
inputs_tensor
,
outputs_tensor
,
reinterpret_cast
<
OpParameter
*>
(
&
param
),
&
ctx
,
desc
);
ASSERT_NE
(
kernel
,
nullptr
);
kernel
->
Run
();
for
(
int
i
=
0
;
i
<
out_size
;
++
i
)
{
std
::
cout
<<
output
[
i
]
<<
" "
;
}
std
::
cout
<<
"
\n
"
;
CompareOutputData
(
output
.
data
(),
expect_out
,
out_size
,
0.000001
);
}
}
// namespace mindspore
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录