Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
正统之独孤求败
mindspore
提交
46df711b
M
mindspore
项目概览
正统之独孤求败
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
46df711b
编写于
8月 20, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 20, 2020
浏览文件
操作
浏览文件
下载
差异文件
!4832 remove all todos
Merge pull request !4832 from wangzhe/master
上级
4ec4205e
0287c6f9
变更
26
隐藏空白更改
内联
并排
Showing
26 changed file
with
20 addition
and
46 deletion
+20
-46
mindspore/lite/src/populate_parameter.cc
mindspore/lite/src/populate_parameter.cc
+0
-2
mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc
mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc
+0
-1
mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc
mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc
+0
-1
mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/conv_fp16.c
mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/conv_fp16.c
+0
-1
mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/arithmetic.c
...spore/lite/src/runtime/kernel/arm/nnacl/fp32/arithmetic.c
+0
-1
mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/conv_depthwise.c
...e/lite/src/runtime/kernel/arm/nnacl/fp32/conv_depthwise.c
+0
-1
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/deconv.c
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/deconv.c
+5
-5
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/reduce_int8.c
...pore/lite/src/runtime/kernel/arm/nnacl/int8/reduce_int8.c
+1
-1
mindspore/lite/src/runtime/opencl/opencl_executor.cc
mindspore/lite/src/runtime/opencl/opencl_executor.cc
+0
-2
mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc
.../lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc
+0
-1
mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc
.../ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc
+0
-2
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc
...src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc
+1
-1
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc
...ime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc
+1
-1
mindspore/lite/tools/converter/legacy_optimizer/fusion/batchnorm_fold_fusion_pass.cc
...ter/legacy_optimizer/fusion/batchnorm_fold_fusion_pass.cc
+1
-1
mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc
...ter/legacy_optimizer/graph/weight_format_hardcode_pass.cc
+1
-1
mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc
...e/lite/tools/converter/parser/caffe/caffe_model_parser.cc
+0
-1
mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc
...e/lite/tools/converter/parser/caffe/caffe_scale_parser.cc
+0
-1
mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc
...converter/parser/onnx/onnx_arithmetic_operation_parser.cc
+1
-2
mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc
...pore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc
+0
-4
mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc
...ore/lite/tools/converter/parser/onnx/onnx_model_parser.cc
+0
-3
mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc
...pore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc
+1
-1
mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc
...e/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc
+0
-1
mindspore/lite/tools/converter/parser/tflite/schema.fbs
mindspore/lite/tools/converter/parser/tflite/schema.fbs
+1
-1
mindspore/lite/tools/converter/quantizer/calc_quant_param.cc
mindspore/lite/tools/converter/quantizer/calc_quant_param.cc
+0
-1
mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc
...lite/tools/converter/quantizer/post_training_quantizer.cc
+0
-2
mindspore/lite/tools/optimizer/common/node_pass_extends.cc
mindspore/lite/tools/optimizer/common/node_pass_extends.cc
+7
-7
未找到文件。
mindspore/lite/src/populate_parameter.cc
浏览文件 @
46df711b
...
...
@@ -263,7 +263,6 @@ OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primiti
pooling_param
->
global_
=
pooling_primitive
->
GetGlobal
();
pooling_param
->
window_w_
=
pooling_primitive
->
GetWindowW
();
pooling_param
->
window_h_
=
pooling_primitive
->
GetWindowH
();
// todo format
auto
pooling_lite_primitive
=
(
lite
::
Pooling
*
)
primitive
;
MS_ASSERT
(
nullptr
!=
pooling_lite_primitive
);
pooling_param
->
pad_u_
=
pooling_lite_primitive
->
PadUp
();
...
...
@@ -402,7 +401,6 @@ OpParameter *PopulateConvDwParameter(const mindspore::lite::PrimitiveC *primitiv
auto
conv_primitive
=
dynamic_cast
<
const
mindspore
::
lite
::
DepthwiseConv2D
*>
(
primitive
);
conv_param
->
kernel_h_
=
conv_primitive
->
GetKernelH
();
conv_param
->
kernel_w_
=
conv_primitive
->
GetKernelW
();
// todo format, group
conv_param
->
stride_h_
=
conv_primitive
->
GetStrideH
();
conv_param
->
stride_w_
=
conv_primitive
->
GetStrideW
();
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/pad.cc
浏览文件 @
46df711b
...
...
@@ -101,7 +101,6 @@ int PadCPUKernel::Run() {
int
output_size
=
output
->
DataSize
();
auto
output_data
=
reinterpret_cast
<
float
*>
(
output
->
Data
());
// todo parallel memset to save time
memset
(
output_data
,
0
,
output_size
*
sizeof
(
float
));
int
error_code
=
LiteBackendParallelLaunch
(
PadImpl
,
this
,
context_
->
thread_num_
);
...
...
mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd.cc
浏览文件 @
46df711b
...
...
@@ -87,7 +87,6 @@ int ScatterNDCPUKernel::ReSize() {
return
RET_ERROR
;
}
}
// todo check indeices out of range
// for (size_t i = 0; i < static_cast<size_t>(indice_unit_rank); i++) {}
// calculate unit_size_
...
...
mindspore/lite/src/runtime/kernel/arm/nnacl/fp16/conv_fp16.c
浏览文件 @
46df711b
...
...
@@ -332,7 +332,6 @@ void ConvFp16(float16_t *input_data, float16_t *packed_input, float16_t *packed_
int
out_channel
=
conv_param
->
output_channel_
;
bool
relu
=
conv_param
->
is_relu_
;
bool
relu6
=
conv_param
->
is_relu6_
;
// todo
int
thread_count
=
conv_param
->
thread_num_
;
const
int
tile_n
=
16
;
int
output_count
=
out_h
*
out_w
;
...
...
mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/arithmetic.c
浏览文件 @
46df711b
...
...
@@ -379,7 +379,6 @@ int BroadcastSub(float *input0, float *input1, float *tile_input0, float *tile_i
return
ElementSub
(
tile_input0
,
tile_input1
,
output
,
element_size
);
}
// todo c=a/b,if(b==0)
int
ElementDiv
(
float
*
input0
,
float
*
input1
,
float
*
output
,
int
element_size
)
{
for
(
int
i
=
0
;
i
<
element_size
;
i
++
)
{
if
(
input1
[
i
]
==
0
)
{
...
...
mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/conv_depthwise.c
浏览文件 @
46df711b
...
...
@@ -423,7 +423,6 @@ void ConvDw3x3Fp32InputTrans(const float *input_data, float *trans_input, float
}
}
// todo yangruoqi: implement assembly
void
ConvDw3x3Fp32Winograd
(
float
*
trans_buffer
,
const
float
*
weight
,
int
out_h_block
,
int
out_w_block
)
{
const
int
unit
=
4
;
for
(
int
oh
=
0
;
oh
<
out_h_block
;
oh
++
)
{
...
...
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/deconv.c
浏览文件 @
46df711b
...
...
@@ -152,7 +152,7 @@ void DeConvWeightTransInt8(int8_t *src, int8_t *dst, int input_channel, int outp
}
}
}
else
{
/*
todo
normal int8 deconv */
/* normal int8 deconv */
}
return
;
}
...
...
@@ -171,7 +171,7 @@ void DeConvPackWeightSum(int8_t *weight, int32_t *weight_sum, int32_t input_zp,
weight_sum
[
c
]
=
filter_zp
*
input_zp
*
deep16
-
value
*
input_zp
;
}
}
else
{
/*
todo
normal int8 deconv */
/* normal int8 deconv */
}
return
;
}
...
...
@@ -188,7 +188,7 @@ void DeConvPackInputSum(const int8_t *src, int32_t *dst, int32_t filter_zp, int
dst
[
r
]
=
tmp_value
*
filter_zp
;
}
}
else
{
/*
todo
normal int8 deconv */
/* normal int8 deconv */
}
return
;
}
...
...
@@ -199,7 +199,7 @@ int DeConvInt8(const int8_t *input, const int8_t *weight, int32_t *output, int32
if
(
matmul_func
!=
NULL
)
{
matmul_func
(
input
,
weight
,
output
,
act_row
,
act_col
,
act_deep
,
input_sum
,
weight_sum
);
}
else
{
/*
todo
normal int8 deconv */
/* normal int8 deconv */
}
return
NNACL_OK
;
}
...
...
@@ -210,7 +210,7 @@ int DeConvPostInt8(const int32_t *src, const int32_t *bias, int32_t *tmp, int8_t
if
(
support_optimize
)
{
error_code
=
DeConvPostInt8C4
(
src
,
bias
,
tmp
,
out
,
output_channel
,
conv_param
);
}
else
{
/*
todo
normal int8 deconv post */
/* normal int8 deconv post */
}
return
error_code
;
}
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/reduce_int8.c
浏览文件 @
46df711b
...
...
@@ -377,7 +377,7 @@ int ReduceProdInt8(const int outer_size, const int inner_size, const int axis_si
if
(
isAddOverflow
(
prod
,
quant
->
in_zp_
))
{
return
NNACL_ERRCODE_ADD_OVERFLOW
;
}
*
inner_dst
=
prod
+
quant
->
in_zp_
;
// todo overflow
*
inner_dst
=
prod
+
quant
->
in_zp_
;
}
}
return
NNACL_OK
;
...
...
mindspore/lite/src/runtime/opencl/opencl_executor.cc
浏览文件 @
46df711b
...
...
@@ -130,7 +130,6 @@ int OpenCLExecutor::TransformTensorLayoutToBuffer(tensor::Tensor *tensor, schema
tensor
->
SetFormat
(
dst_format
);
return
RET_OK
;
}
else
if
(
dst_format
==
schema
::
Format_NHWC
)
{
// TODO(wandongdong): add support !!
return
RET_OK
;
}
else
{
MS_LOG
(
ERROR
)
<<
"Unsupported layout transform: "
<<
schema
::
EnumNameFormat
(
tensor
->
GetFormat
())
<<
" to "
...
...
@@ -200,7 +199,6 @@ int OpenCLExecutor::TransformTensorLayoutUint8(tensor::Tensor *tensor, schema::F
MS_ASSERT
(
nullptr
!=
tensor
);
MS_ASSERT
(
4
==
tensor
->
shape
().
size
());
// auto src_format = tensor->GetFormat();
// todo
MS_LOG
(
ERROR
)
<<
"Unsupported layout transform: "
<<
schema
::
EnumNameFormat
(
tensor
->
GetFormat
())
<<
" to "
<<
schema
::
EnumNameFormat
(
dst_format
)
<<
" in uint8"
;
return
RET_ERROR
;
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc
浏览文件 @
46df711b
...
...
@@ -138,7 +138,6 @@ TEST_F(TestPack, PackWeightFp32) {
#ifdef ENABLE_FP16
TEST_F
(
TestPack
,
PackInputFp16
)
{
// todo
size_t
input_size
;
std
::
string
input_path
=
"./test_data/conv/convfp32_input_1_28_28_3.bin"
;
auto
input_data
=
reinterpret_cast
<
float
*>
(
mindspore
::
lite
::
ReadFile
(
input_path
.
c_str
(),
&
input_size
));
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/convolution_fp16_tests.cc
浏览文件 @
46df711b
...
...
@@ -328,7 +328,6 @@ TEST_F(TestConvolutionFp16, ConvTest2) {
TEST_F
(
TestConvolutionFp16
,
Conv3x3Test1
)
{
auto
conv_param
=
new
ConvParameter
();
InitConvParamGroup1Fp16
(
conv_param
);
// todo
int
thread_count
=
1
;
int
tile_num
=
16
;
int
output_batch
=
conv_param
->
output_batch_
;
...
...
@@ -474,7 +473,6 @@ TEST_F(TestConvolutionFp16, Conv3x3Test1) {
TEST_F
(
TestConvolutionFp16
,
Conv3x3Test2
)
{
auto
conv_param
=
new
ConvParameter
();
InitConvParamGroup2Fp16
(
conv_param
);
// todo
int
thread_count
=
1
;
int
tile_num
=
16
;
int
output_batch
=
conv_param
->
output_batch_
;
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc
浏览文件 @
46df711b
...
...
@@ -90,7 +90,7 @@ TEST_F(TestResizeBilinearInt8, Bilinear0) {
int8_t
expect
[
16
]
=
{
4
,
4
,
4
,
4
,
4
,
5
,
5
,
5
,
5
,
5
,
6
,
6
,
5
,
5
,
6
,
6
};
Prepare
(
in_shape
,
out_shape
,
input_data
,
output_data
,
quant_in
,
quant_out
,
align_corners
,
thread_num
);
kernel_
->
Init
();
// todo delete
kernel_
->
Init
();
kernel_
->
Run
();
CompareOutputInt8
(
output_data
,
expect
,
16
,
err_percent_
);
...
...
mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc
浏览文件 @
46df711b
...
...
@@ -92,7 +92,7 @@ TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor0) {
err_percent_
=
0.25
f
;
Prepare
(
in_shape
,
out_shape
,
input_data
,
output_data
,
quant_in
,
quant_out
,
false
,
thread_num
);
kernel_
->
Init
();
// todo delete
kernel_
->
Init
();
kernel_
->
Run
();
CompareOutputInt8
(
output_data
,
expect
,
16
,
err_percent_
);
...
...
mindspore/lite/tools/converter/legacy_optimizer/fusion/batchnorm_fold_fusion_pass.cc
浏览文件 @
46df711b
...
...
@@ -348,7 +348,7 @@ STATUS BatchNormFoldFusionPass::GenNewBiasTensor() { // bias has no quant
MS_LOG
(
ERROR
)
<<
"new BiasTensor failed"
;
return
RET_ERROR
;
}
newBiasTensor
->
dataType
=
0
;
// todo is float
newBiasTensor
->
dataType
=
0
;
newBiasTensor
->
format
=
Format_NUM_OF_FORMAT
;
newBiasTensor
->
refCount
=
schema
::
NodeType_ValueNode
;
newBiasTensor
->
dims
=
biasShape
;
...
...
mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc
浏览文件 @
46df711b
...
...
@@ -29,7 +29,7 @@ void WeightFormatHardCodePass::SetFmkType(converter::FmkType fmkType) { this->fm
// pre set tensor format
// non quant, filterFormat:
// conv deconv depth dedepth
// caffe K(C/g)HW C(K/g)HW / /
// todo with deconvOp
// caffe K(C/g)HW C(K/g)HW / /
// tf HWCK HWKC HWCK HWKC
// onnx K(C/g)HW C(K/g)HW / /
...
...
mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc
浏览文件 @
46df711b
...
...
@@ -174,7 +174,6 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
break
;
}
}
// todo y00520784 : layer.input_param().shape(0)
if
(
layer
.
type
()
==
"Input"
)
{
std
::
unique_ptr
<
schema
::
TensorT
>
msTensor
=
std
::
make_unique
<
schema
::
TensorT
>
();
for
(
int
j
=
0
;
j
<
layer
.
input_param
().
shape
(
0
).
dim_size
();
j
++
)
{
...
...
mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc
浏览文件 @
46df711b
...
...
@@ -43,7 +43,6 @@ STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::
attr
->
axis
=
axis
;
// parse scale
// todo expect only weight as scale not bias
if
(
weight
.
blobs
().
size
()
==
1
)
{
auto
scale
=
ConvertWeight
(
weight
.
blobs
(
0
));
if
(
scale
==
nullptr
)
{
...
...
mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc
浏览文件 @
46df711b
...
...
@@ -66,8 +66,7 @@ STATUS OnnxDivParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS
OnnxPowParser
::
Parse
(
const
onnx
::
GraphProto
&
onnx_graph
,
const
onnx
::
NodeProto
&
onnx_node
,
schema
::
CNodeT
*
op
)
{
MS_LOG
(
DEBUG
)
<<
"onnx PowParser"
;
if
(
op
!=
nullptr
)
{
// TODO(wangzhe) attr power need populate
std
::
unique_ptr
<
schema
::
PowerT
>
attr
=
std
::
make_unique
<
schema
::
PowerT
>
();
std
::
unique_ptr
<
schema
::
PowerT
>
attr
(
new
schema
::
PowerT
());
op
->
primitive
=
std
::
make_unique
<
schema
::
PrimitiveT
>
();
op
->
primitive
->
value
.
type
=
schema
::
PrimitiveType_Power
;
op
->
primitive
->
value
.
value
=
attr
.
release
();
...
...
mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc
浏览文件 @
46df711b
...
...
@@ -65,7 +65,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG
(
ERROR
)
<<
"dilations size "
<<
onnx_node_attr
.
ints
().
size
()
<<
" is not 2"
;
return
RET_ERROR
;
}
// TODO(wangzhe) verify the change
attr
->
dilateH
=
static_cast
<
int32_t
>
(
onnx_node_attr
.
ints
(
0
));
attr
->
dilateW
=
static_cast
<
int32_t
>
(
onnx_node_attr
.
ints
(
1
));
}
else
if
(
onnx_node_attr
.
name
()
==
"kernels"
)
{
...
...
@@ -80,7 +79,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG
(
ERROR
)
<<
"kernel_shape size "
<<
onnx_node_attr
.
ints
().
size
()
<<
" is not 2"
;
return
RET_ERROR
;
}
// TODO(wangzhe) verify the change
attr
->
kernelH
=
static_cast
<
int32_t
>
(
onnx_node_attr
.
ints
(
0
));
attr
->
kernelW
=
static_cast
<
int32_t
>
(
onnx_node_attr
.
ints
(
1
));
}
else
if
(
onnx_node_attr
.
name
()
==
"auto_pad"
)
{
...
...
@@ -99,7 +97,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG
(
ERROR
)
<<
"strides size "
<<
onnx_node_attr
.
ints
().
size
()
<<
" is not 2"
;
return
RET_ERROR
;
}
// TODO(wangzhe) verify the change
attr
->
strideH
=
static_cast
<
int32_t
>
(
onnx_node_attr
.
ints
(
0
));
attr
->
strideW
=
static_cast
<
int32_t
>
(
onnx_node_attr
.
ints
(
1
));
}
else
if
(
onnx_node_attr
.
name
()
==
"order"
)
{
...
...
@@ -143,7 +140,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
dims
.
insert
(
dims
.
begin
(),
iter
->
ints
().
begin
(),
iter
->
ints
().
end
());
}
attr
->
channelOut
=
dims
[
0
];
// TODO(wangzhe) verify this code
attr
->
channelIn
=
dims
[
3
]
*
attr
->
group
;
}
attr
->
format
=
schema
::
Format_NCHW
;
...
...
mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc
浏览文件 @
46df711b
...
...
@@ -241,7 +241,6 @@ STATUS OnnxModelParser::ParseOnnxGivenFillNode(const onnx::NodeProto &onnx_node,
std
::
for_each
(
shape
.
begin
(),
shape
.
end
(),
[
&
data_count
](
int
dim
)
{
data_count
*=
dim
;
});
size_t
data_size
=
0
;
if
(
onnx_node
.
op_type
()
==
"Int8GivenIntTensorFill"
)
{
// todo how to read onnx-ori-dataType
tensor
->
dataType
=
kNumberTypeInt32
;
data_size
=
data_count
*
sizeof
(
int32_t
)
/
sizeof
(
uint8_t
);
tensor
->
data
.
resize
(
data_size
);
...
...
@@ -252,9 +251,7 @@ STATUS OnnxModelParser::ParseOnnxGivenFillNode(const onnx::NodeProto &onnx_node,
castedTensorData
[
i
]
=
int32_t
(
iter
->
ints
().
data
()[
i
]);
}
}
else
if
(
onnx_node
.
op_type
()
==
"Int8GivenTensorFill"
)
{
// todo how to read onnx-ori-dataType
tensor
->
dataType
=
kNumberTypeUInt8
;
// todo: add * sizof(string)
data_size
=
data_count
;
tensor
->
data
.
resize
(
data_size
);
MS_LOG
(
DEBUG
)
<<
"tensor data size "
<<
data_size
<<
", s: "
<<
sizeof
(
iter
->
s
().
data
());
...
...
mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc
浏览文件 @
46df711b
...
...
@@ -65,7 +65,7 @@ STATUS OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No
if
(
slope_size
==
1
)
{
attr
->
slope
.
push_back
(
*
slope_raw_data
);
attr
->
channelShared
=
true
;
}
else
{
// TODO(wangzhe) we don't check input tensor's channel size, this may cause problem
}
else
{
attr
->
slope
.
resize
(
slope_size
);
attr
->
channelShared
=
false
;
if
(
memcpy_s
(
attr
->
slope
.
data
(),
slope_size
*
sizeof
(
float
),
slope_raw_data
,
slope_size
*
sizeof
(
float
))
!=
0
)
{
...
...
mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc
浏览文件 @
46df711b
...
...
@@ -26,7 +26,6 @@ STATUS OnnxReshapeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::
std
::
unique_ptr
<
schema
::
ReshapeT
>
attr
=
std
::
make_unique
<
schema
::
ReshapeT
>
();
attr
->
format
=
schema
::
Format_NCHW
;
std
::
vector
<
onnx
::
TensorProto
>
params
;
// TODO(wangzhe) shape may also come from other op, there need refactor to introduce tensor_cache
for
(
int
i
=
0
;
i
<
onnx_node
.
input_size
();
++
i
)
{
const
auto
&
input_name
=
onnx_node
.
input
(
i
);
for
(
const
auto
&
it
:
onnx_graph
.
initializer
())
{
...
...
mindspore/lite/tools/converter/parser/tflite/schema.fbs
浏览文件 @
46df711b
...
...
@@ -247,7 +247,7 @@ enum BuiltinOperator : byte {
SPACE_TO_DEPTH = 26,
SVDF = 27,
TANH = 28,
//
TODO(aselle):
Consider rename to CONCATENATE_EMBEDDINGS
// Consider rename to CONCATENATE_EMBEDDINGS
CONCAT_EMBEDDINGS = 29,
SKIP_GRAM = 30,
CALL = 31,
...
...
mindspore/lite/tools/converter/quantizer/calc_quant_param.cc
浏览文件 @
46df711b
...
...
@@ -168,7 +168,6 @@ int LinearCalcer::Calc(MetaGraphT *graph, const CNodeT &node) {
if
(
outQuantParam
->
inited
)
{
continue
;
}
// todo copy quant params
outTensor
->
quantParams
.
front
()
=
std
::
move
(
outQuantParam
);
}
}
...
...
mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc
浏览文件 @
46df711b
...
...
@@ -802,7 +802,6 @@ STATUS PostTrainingQuantizer::CheckTensorVec(const std::string &nodeName,
**/
STATUS
PostTrainingQuantizer
::
DoInference
()
{
for
(
size_t
i
=
0
;
i
<
calibrator_
->
GetBatchNum
();
i
++
)
{
// TODO(x) when model has inputs count > 1
// get input tensor
vector
<
mindspore
::
tensor
::
MSTensor
*>
inputs
=
session_
->
GetInputs
();
if
(
inputs
.
size
()
>
1
)
{
...
...
@@ -854,7 +853,6 @@ STATUS PostTrainingQuantizer::DoInference() {
STATUS
PostTrainingQuantizer
::
CollectDataFrequency
()
{
for
(
size_t
i
=
0
;
i
<
calibrator_
->
GetBatchNum
();
i
++
)
{
// TODO(x) when model has inputs count > 1
// get input tensor
vector
<
mindspore
::
tensor
::
MSTensor
*>
inputs
=
session_
->
GetInputs
();
if
(
inputs
.
size
()
>
1
)
{
...
...
mindspore/lite/tools/optimizer/common/node_pass_extends.cc
浏览文件 @
46df711b
...
...
@@ -33,11 +33,11 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) {
manager
->
AddFuncGraph
(
func_graph
);
std
::
unordered_set
<
AnfNodePtr
>
seen_node
;
std
::
deque
<
AnfNodePtr
>
to
do
{
func_graph
->
output
()};
std
::
deque
<
AnfNodePtr
>
to
_process
{
func_graph
->
output
()};
bool
changes
=
false
;
while
(
!
to
do
.
empty
())
{
AnfNodePtr
node
=
to
do
.
front
();
to
do
.
pop_front
();
while
(
!
to
_process
.
empty
())
{
AnfNodePtr
node
=
to
_process
.
front
();
to
_process
.
pop_front
();
if
(
seen_node
.
count
(
node
)
>
0
||
!
manager
->
all_nodes
().
contains
(
node
))
{
continue
;
}
...
...
@@ -53,15 +53,15 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) {
if
(
new_node
&&
IsValueNode
<
FuncGraph
>
(
new_node
))
{
auto
const_func_graph
=
GetValueNode
<
FuncGraphPtr
>
(
new_node
);
MS_EXCEPTION_IF_NULL
(
const_func_graph
);
to
do
.
push_back
(
const_func_graph
->
output
());
to
_process
.
push_back
(
const_func_graph
->
output
());
}
else
if
(
new_node
&&
new_node
->
isa
<
CNode
>
())
{
if
(
IsGraphKernel
(
new_node
))
{
to
do
.
push_back
(
new_node
);
to
_process
.
push_back
(
new_node
);
}
auto
cnode
=
new_node
->
cast
<
CNodePtr
>
();
MS_EXCEPTION_IF_NULL
(
cnode
);
auto
inputs
=
cnode
->
inputs
();
(
void
)
to
do
.
insert
(
todo
.
end
(),
inputs
.
begin
(),
inputs
.
end
());
(
void
)
to
_process
.
insert
(
to_process
.
end
(),
inputs
.
begin
(),
inputs
.
end
());
}
changes
=
changes
||
change
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录