Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
6fe36278
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6fe36278
编写于
8月 19, 2020
作者:
W
wsc
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean cmake building errors
上级
40dced5d
变更
33
隐藏空白更改
内联
并排
Showing
33 changed file
with
61 addition
and
58 deletion
+61
-58
build.sh
build.sh
+1
-1
mindspore/lite/src/ops/addn.cc
mindspore/lite/src/ops/addn.cc
+1
-1
mindspore/lite/src/ops/argmax.cc
mindspore/lite/src/ops/argmax.cc
+1
-1
mindspore/lite/src/ops/argmin.cc
mindspore/lite/src/ops/argmin.cc
+1
-1
mindspore/lite/src/ops/arithmetic.cc
mindspore/lite/src/ops/arithmetic.cc
+4
-4
mindspore/lite/src/ops/batch_to_space.cc
mindspore/lite/src/ops/batch_to_space.cc
+1
-1
mindspore/lite/src/ops/concat.cc
mindspore/lite/src/ops/concat.cc
+1
-1
mindspore/lite/src/ops/embedding_lookup.cc
mindspore/lite/src/ops/embedding_lookup.cc
+1
-1
mindspore/lite/src/ops/expand_dims.cc
mindspore/lite/src/ops/expand_dims.cc
+1
-1
mindspore/lite/src/ops/flatten.cc
mindspore/lite/src/ops/flatten.cc
+1
-1
mindspore/lite/src/ops/full_connection.cc
mindspore/lite/src/ops/full_connection.cc
+1
-1
mindspore/lite/src/ops/gather.cc
mindspore/lite/src/ops/gather.cc
+1
-1
mindspore/lite/src/ops/matmul.cc
mindspore/lite/src/ops/matmul.cc
+1
-1
mindspore/lite/src/ops/mean.cc
mindspore/lite/src/ops/mean.cc
+2
-2
mindspore/lite/src/ops/prior_box.cc
mindspore/lite/src/ops/prior_box.cc
+1
-1
mindspore/lite/src/ops/reduce.cc
mindspore/lite/src/ops/reduce.cc
+2
-2
mindspore/lite/src/ops/reshape.cc
mindspore/lite/src/ops/reshape.cc
+3
-3
mindspore/lite/src/ops/slice.cc
mindspore/lite/src/ops/slice.cc
+1
-1
mindspore/lite/src/ops/split.cc
mindspore/lite/src/ops/split.cc
+1
-1
mindspore/lite/src/ops/squeeze.cc
mindspore/lite/src/ops/squeeze.cc
+4
-4
mindspore/lite/src/ops/stack.cc
mindspore/lite/src/ops/stack.cc
+1
-1
mindspore/lite/src/ops/strided_slice.cc
mindspore/lite/src/ops/strided_slice.cc
+5
-5
mindspore/lite/src/ops/transpose.cc
mindspore/lite/src/ops/transpose.cc
+1
-1
mindspore/lite/src/ops/unsqueeze.cc
mindspore/lite/src/ops/unsqueeze.cc
+4
-4
mindspore/lite/src/ops/unstack.cc
mindspore/lite/src/ops/unstack.cc
+1
-1
mindspore/lite/src/ops/where.cc
mindspore/lite/src/ops/where.cc
+2
-2
mindspore/lite/tools/anf_exporter/anf_exporter.cc
mindspore/lite/tools/anf_exporter/anf_exporter.cc
+2
-2
mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.cc
...erter/legacy_optimizer/graph/eltwise_format_trans_pass.cc
+2
-2
mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc
...te/tools/converter/parser/caffe/caffe_batchnorm_parser.cc
+4
-5
mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc
...pore/lite/tools/converter/parser/caffe/caffe_inspector.cc
+6
-3
mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc
...re/lite/tools/converter/parser/caffe/caffe_node_parser.cc
+1
-0
mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc
.../lite/tools/converter/parser/tflite/tflite_addn_parser.cc
+1
-1
mindspore/lite/tools/converter/quantizer/general_bitpacking.cc
...pore/lite/tools/converter/quantizer/general_bitpacking.cc
+1
-1
未找到文件。
build.sh
浏览文件 @
6fe36278
...
...
@@ -616,7 +616,7 @@ build_lite()
-DANDROID_STL
=
"c++_shared"
-DCMAKE_BUILD_TYPE
=
${
BUILD_TYPE
}
\
-DBUILD_DEVICE
=
on
-DPLATFORM_ARM32
=
on
-DENABLE_NEON
=
on
-DSUPPORT_TRAIN
=
${
SUPPORT_TRAIN
}
-DBUILD_CONVERTER
=
off
\
-DSUPPORT_GPU
=
${
ENABLE_GPU
}
-DOFFLINE_COMPILE
=
${
OPENCL_OFFLINE_COMPILE
}
-DBUILD_MINDDATA
=
${
COMPILE_MINDDATA_LITE
}
\
-DCMAKE_INSTALL_PREFIX
=
${
BASEPATH
}
/output/tmp
"
${
BASEPATH
}
/mindspore/lite"
-DCMAKE_INSTALL_PREFIX
=
${
BASEPATH
}
/output/tmp
"
${
BASEPATH
}
/mindspore/lite"
else
cmake
-DBUILD_DEVICE
=
on
-DPLATFORM_ARM64
=
off
-DBUILD_CONVERTER
=
${
ENABLE_CONVERTER
}
-DSUPPORT_TRAIN
=
${
SUPPORT_TRAIN
}
\
-DCMAKE_BUILD_TYPE
=
${
BUILD_TYPE
}
-DSUPPORT_GPU
=
${
ENABLE_GPU
}
-DBUILD_MINDDATA
=
${
COMPILE_MINDDATA_LITE
}
\
...
...
mindspore/lite/src/ops/addn.cc
浏览文件 @
6fe36278
...
...
@@ -48,7 +48,7 @@ int AddN::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::T
if
(
!
GetInferFlag
())
{
return
RET_OK
;
}
for
(
in
t
i
=
1
;
i
<
inputs
.
size
();
++
i
)
{
for
(
size_
t
i
=
1
;
i
<
inputs
.
size
();
++
i
)
{
if
(
inputs
.
at
(
i
)
->
shape
()
!=
inputs
.
at
(
0
)
->
shape
())
{
MS_LOG
(
ERROR
)
<<
"AddN inputs shape is not equal!"
;
return
RET_INPUT_TENSOR_ERROR
;
...
...
mindspore/lite/src/ops/argmax.cc
浏览文件 @
6fe36278
...
...
@@ -63,7 +63,7 @@ int ArgMax::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
}
std
::
vector
<
int
>
output_shape
(
input
->
shape
());
auto
input_shape_size
=
input
->
shape
().
size
();
int
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input_shape_size
:
GetAxis
();
auto
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input_shape_size
:
GetAxis
();
if
(
axis
>=
input_shape_size
||
axis
<
0
)
{
MS_LOG
(
ERROR
)
<<
"Invalid axis "
<<
GetAxis
()
<<
", input shape size: "
<<
input_shape_size
;
return
RET_PARAM_INVALID
;
...
...
mindspore/lite/src/ops/argmin.cc
浏览文件 @
6fe36278
...
...
@@ -61,7 +61,7 @@ int ArgMin::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<
return
RET_OK
;
}
auto
input_shape_size
=
input
->
shape
().
size
();
int
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input_shape_size
:
GetAxis
();
auto
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input_shape_size
:
GetAxis
();
if
(
axis
>=
input_shape_size
||
axis
<
0
)
{
MS_LOG
(
ERROR
)
<<
"Invalid axis "
<<
GetAxis
()
<<
", input shape size: "
<<
input_shape_size
;
return
RET_PARAM_INVALID
;
...
...
mindspore/lite/src/ops/arithmetic.cc
浏览文件 @
6fe36278
...
...
@@ -55,7 +55,7 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec
ndim_
=
input_shape1
.
size
();
auto
fill_dim_num
=
input_shape1
.
size
()
-
input_shape0
.
size
();
int
j
=
0
;
for
(
in
t
i
=
0
;
i
<
input_shape1
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape1
.
size
();
i
++
)
{
if
(
i
<
fill_dim_num
)
{
in_shape0_
[
i
]
=
1
;
}
else
{
...
...
@@ -68,7 +68,7 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec
ndim_
=
input_shape0
.
size
();
auto
fill_dim_num
=
input_shape0
.
size
()
-
input_shape1
.
size
();
int
j
=
0
;
for
(
in
t
i
=
0
;
i
<
input_shape0
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape0
.
size
();
i
++
)
{
if
(
i
<
fill_dim_num
)
{
in_shape1_
[
i
]
=
1
;
}
else
{
...
...
@@ -77,14 +77,14 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec
in_shape0_
[
i
]
=
input_shape0
[
i
];
}
}
else
{
for
(
in
t
i
=
0
;
i
<
input_shape0
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape0
.
size
();
i
++
)
{
in_shape1_
[
i
]
=
input_shape1
[
i
];
in_shape0_
[
i
]
=
input_shape0
[
i
];
}
}
std
::
vector
<
int
>
output_shape
;
for
(
size_
t
i
=
0
;
i
<
ndim_
;
i
++
)
{
for
(
in
t
i
=
0
;
i
<
ndim_
;
i
++
)
{
if
(
in_shape0_
[
i
]
!=
in_shape1_
[
i
])
{
if
(
in_shape0_
[
i
]
==
1
)
{
out_shape_
[
i
]
=
in_shape1_
[
i
];
...
...
mindspore/lite/src/ops/batch_to_space.cc
浏览文件 @
6fe36278
...
...
@@ -85,7 +85,7 @@ int BatchToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::ve
MS_LOG
(
ERROR
)
<<
"Crops size should be "
<<
kCropsSize
;
return
RET_PARAM_INVALID
;
}
size_
t
mul_block_shape
=
1
;
in
t
mul_block_shape
=
1
;
for
(
size_t
i
=
0
;
i
<
kBlockShapeSize
;
++
i
)
{
if
(
block_shape
[
i
]
<=
0
)
{
...
...
mindspore/lite/src/ops/concat.cc
浏览文件 @
6fe36278
...
...
@@ -58,7 +58,7 @@ int Concat::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
MS_ASSERT
(
concat_prim
!=
nullptr
);
auto
input0_shape
=
inputs_
.
at
(
0
)
->
shape
();
int
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input0_shape
.
size
()
:
GetAxis
();
auto
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input0_shape
.
size
()
:
GetAxis
();
if
(
axis
<
0
||
axis
>=
input0_shape
.
size
())
{
MS_LOG
(
ERROR
)
<<
"Invalid axis: "
<<
axis
;
return
RET_PARAM_INVALID
;
...
...
mindspore/lite/src/ops/embedding_lookup.cc
浏览文件 @
6fe36278
...
...
@@ -58,7 +58,7 @@ int EmbeddingLookup::InferShape(std::vector<tensor::Tensor *> inputs_, std::vect
for
(
size_t
i
=
0
;
i
<
embedding_shape
.
size
();
++
i
)
{
output_shape
.
push_back
(
embedding_shape
.
at
(
i
));
}
for
(
in
t
i
=
1
;
i
<
inputs_
.
size
()
-
1
;
++
i
)
{
for
(
size_
t
i
=
1
;
i
<
inputs_
.
size
()
-
1
;
++
i
)
{
auto
embedding_shape_t
=
inputs_
.
at
(
i
)
->
shape
();
embedding_shape_t
.
erase
(
embedding_shape_t
.
begin
());
if
(
embedding_shape_t
!=
embedding_shape
)
{
...
...
mindspore/lite/src/ops/expand_dims.cc
浏览文件 @
6fe36278
...
...
@@ -51,7 +51,7 @@ int ExpandDims::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<te
if
(
dim
<
0
)
{
dim
+=
input
->
shape
().
size
()
+
1
;
}
if
(
dim
>
input
->
shape
().
size
(
))
{
if
(
dim
>
static_cast
<
int
>
(
input
->
shape
().
size
()
))
{
MS_LOG
(
ERROR
)
<<
"attribute dim out of range"
;
return
RET_INPUT_TENSOR_ERROR
;
}
...
...
mindspore/lite/src/ops/flatten.cc
浏览文件 @
6fe36278
...
...
@@ -42,7 +42,7 @@ int Flatten::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tenso
std
::
vector
<
int
>
output_shape
(
2
);
output_shape
[
0
]
=
input_shape
[
0
];
output_shape
[
1
]
=
1
;
for
(
in
t
i
=
1
;
i
<
input_shape
.
size
();
i
++
)
{
for
(
size_
t
i
=
1
;
i
<
input_shape
.
size
();
i
++
)
{
output_shape
[
1
]
*=
input_shape
[
i
];
}
output
->
set_shape
(
output_shape
);
...
...
mindspore/lite/src/ops/full_connection.cc
浏览文件 @
6fe36278
...
...
@@ -60,7 +60,7 @@ int FullConnection::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
MS_LOG
(
ERROR
)
<<
"Input tensors num error"
;
return
1
;
}
if
(
GetAxis
()
<
1
||
GetAxis
()
>
input0
->
shape
().
size
(
))
{
if
(
GetAxis
()
<
1
||
GetAxis
()
>
static_cast
<
int
>
(
input0
->
shape
().
size
()
))
{
MS_LOG
(
ERROR
)
<<
"FullConnection axis invalid"
;
return
1
;
}
...
...
mindspore/lite/src/ops/gather.cc
浏览文件 @
6fe36278
...
...
@@ -83,7 +83,7 @@ int Gather::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
}
std
::
vector
<
int
>
out_shape
{
in_shape
};
out_shape
.
erase
(
out_shape
.
begin
()
+
axis
);
for
(
size_
t
i
=
0
;
i
<
indices_rank
;
i
++
)
{
for
(
in
t
i
=
0
;
i
<
indices_rank
;
i
++
)
{
out_shape
.
insert
(
out_shape
.
begin
()
+
axis
,
indices_shape
[
i
]);
}
output
->
set_shape
(
out_shape
);
...
...
mindspore/lite/src/ops/matmul.cc
浏览文件 @
6fe36278
...
...
@@ -56,7 +56,7 @@ int MatMul::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
MS_LOG
(
ERROR
)
<<
"inputs shape is invalid"
;
return
RET_INPUT_TENSOR_ERROR
;
}
for
(
in
t
i
=
0
;
i
<
a_shape
.
size
()
-
2
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
a_shape
.
size
()
-
2
;
++
i
)
{
if
(
a_shape
[
i
]
!=
b_shape
[
i
])
{
MS_LOG
(
ERROR
)
<<
"Op MatMul's dimensions must be equal"
;
return
RET_INPUT_TENSOR_ERROR
;
...
...
mindspore/lite/src/ops/mean.cc
浏览文件 @
6fe36278
...
...
@@ -67,7 +67,7 @@ int Mean::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::
// reduce on all axes
if
(
num_axes
==
0
)
{
if
(
keep_dims
)
{
for
(
auto
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
out_shape
.
push_back
(
1
);
}
}
...
...
@@ -78,7 +78,7 @@ int Mean::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::
// reduce on selected axes
for
(
size_t
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
bool
reduce_axis
=
false
;
for
(
in
t
idx
=
0
;
idx
<
num_axes
;
++
idx
)
{
for
(
size_
t
idx
=
0
;
idx
<
num_axes
;
++
idx
)
{
if
(
static_cast
<
size_t
>
(
axes
[
idx
])
==
i
)
{
reduce_axis
=
true
;
break
;
...
...
mindspore/lite/src/ops/prior_box.cc
浏览文件 @
6fe36278
...
...
@@ -110,7 +110,7 @@ int PriorBox::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tens
std
::
vector
<
float
>
different_aspect_ratios
{
1.0
f
};
auto
aspect_ratios
=
GetAspectRatios
();
MS_ASSERT
(
aspect_ratios
!=
nullptr
);
for
(
auto
i
=
0
;
i
<
aspect_ratios
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
aspect_ratios
.
size
();
i
++
)
{
float
ratio
=
aspect_ratios
[
i
];
bool
exist
=
std
::
any_of
(
different_aspect_ratios
.
begin
(),
different_aspect_ratios
.
end
(),
[
&
](
float
v
)
{
return
abs
(
ratio
-
v
)
<
1e-6
;
});
...
...
mindspore/lite/src/ops/reduce.cc
浏览文件 @
6fe36278
...
...
@@ -71,7 +71,7 @@ int Reduce::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
// reduce on all axes
if
(
num_axes
==
0
)
{
if
(
keep_dims
)
{
for
(
auto
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
for
(
size_t
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
out_shape
.
push_back
(
1
);
}
}
...
...
@@ -82,7 +82,7 @@ int Reduce::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
// reduce on selected axes
for
(
size_t
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
bool
reduce_axis
=
false
;
for
(
in
t
idx
=
0
;
idx
<
num_axes
;
++
idx
)
{
for
(
size_
t
idx
=
0
;
idx
<
num_axes
;
++
idx
)
{
if
(
static_cast
<
size_t
>
(
axes
[
idx
])
==
i
||
static_cast
<
size_t
>
(
axes
[
idx
]
+
in_shape
.
size
())
==
i
)
{
reduce_axis
=
true
;
break
;
...
...
mindspore/lite/src/ops/reshape.cc
浏览文件 @
6fe36278
...
...
@@ -80,15 +80,15 @@ void CalShape(const T *data, const std::vector<tensor::Tensor *> &inputs, std::v
int
input_count
=
inputs
[
0
]
->
ElementsNum
();
int
index
=
0
;
int
size
=
1
;
for
(
size_
t
i
=
0
;
i
<
shape_size
;
i
++
)
{
if
(
data
[
i
]
==
-
1
)
{
for
(
in
t
i
=
0
;
i
<
shape_size
;
i
++
)
{
if
(
static_cast
<
int
>
(
data
[
i
])
==
-
1
)
{
index
=
i
;
}
else
{
size
*=
data
[
i
];
}
out_shape
->
push_back
(
data
[
i
]);
}
if
(
data
[
index
]
==
-
1
)
{
if
(
static_cast
<
int
>
(
data
[
index
])
==
-
1
)
{
(
*
out_shape
)[
index
]
=
input_count
/
size
;
}
}
...
...
mindspore/lite/src/ops/slice.cc
浏览文件 @
6fe36278
...
...
@@ -67,7 +67,7 @@ int SliceOp::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<
std
::
vector
<
int32_t
>
slice_begin
(
GetBegin
().
begin
(),
GetBegin
().
end
());
std
::
vector
<
int32_t
>
slice_size
(
GetSize
().
begin
(),
GetSize
().
end
());
std
::
vector
<
int32_t
>
output_shape
(
input_shape
.
size
());
for
(
in
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
input_shape
.
size
();
++
i
)
{
if
(
slice_size
[
i
]
<
0
&&
slice_size
[
i
]
!=
-
1
)
{
MS_LOG
(
ERROR
)
<<
"Invalid size input!size["
<<
i
<<
"]="
<<
slice_size
[
i
];
return
RET_PARAM_INVALID
;
...
...
mindspore/lite/src/ops/split.cc
浏览文件 @
6fe36278
...
...
@@ -62,7 +62,7 @@ int Split::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor:
return
RET_ERROR
;
}
int
number_split
=
spilt_prim
->
numberSplit
();
if
(
outputs_
.
size
(
)
!=
number_split
)
{
if
(
static_cast
<
int
>
(
outputs_
.
size
()
)
!=
number_split
)
{
MS_LOG
(
ERROR
)
<<
"outputs number is not equal to "
<<
number_split
;
return
RET_ERROR
;
}
...
...
mindspore/lite/src/ops/squeeze.cc
浏览文件 @
6fe36278
...
...
@@ -62,15 +62,15 @@ int Squeeze::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tenso
axes_
.
push_back
(
*
iter
);
}
if
(
axes_
.
size
()
==
0
)
{
for
(
in
t
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
if
(
in_shape
[
i
]
!=
1
)
{
out_shape
.
push_back
(
in_shape
[
i
]);
}
}
}
else
{
in
t
axisIdx
=
0
;
for
(
in
t
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
if
(
axisIdx
<
axes_
.
size
()
&&
axes_
[
axisIdx
]
==
i
)
{
size_
t
axisIdx
=
0
;
for
(
size_
t
i
=
0
;
i
<
in_shape
.
size
();
i
++
)
{
if
(
axisIdx
<
axes_
.
size
()
&&
axes_
[
axisIdx
]
==
static_cast
<
int
>
(
i
)
)
{
MS_ASSERT
(
in_shape
[
i
]
==
1
);
axisIdx
++
;
continue
;
...
...
mindspore/lite/src/ops/stack.cc
浏览文件 @
6fe36278
...
...
@@ -64,7 +64,7 @@ int Stack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::
auto
input_shape
=
input
->
shape
();
std
::
vector
<
int32_t
>
output_shape
=
input_shape
;
int
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input_shape
.
size
()
:
GetAxis
();
auto
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input_shape
.
size
()
:
GetAxis
();
if
(
axis
<
0
||
axis
>
input_shape
.
size
())
{
MS_LOG
(
ERROR
)
<<
"Invalid axis "
<<
GetAxis
();
return
RET_PARAM_INVALID
;
...
...
mindspore/lite/src/ops/strided_slice.cc
浏览文件 @
6fe36278
...
...
@@ -89,7 +89,7 @@ constexpr int kStridedSliceInputNum = 1;
}
// namespace
void
StridedSlice
::
ApplyNewAxisMask
()
{
for
(
in
t
i
=
0
;
i
<
new_axis_mask_
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
new_axis_mask_
.
size
();
i
++
)
{
if
(
new_axis_mask_
.
at
(
i
))
{
ndim_
+=
1
;
in_shape_
.
insert
(
in_shape_
.
begin
()
+
i
,
1
);
...
...
@@ -112,7 +112,7 @@ void StridedSlice::ApplyNewAxisMask() {
std
::
vector
<
int
>
StridedSlice
::
ApplyShrinkMask
(
std
::
vector
<
int
>
out_shape
)
{
auto
old_out_shape
=
out_shape
;
out_shape
.
clear
();
for
(
in
t
i
=
0
;
i
<
shrink_axis_mask_
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
shrink_axis_mask_
.
size
();
i
++
)
{
if
(
shrink_axis_mask_
.
at
(
i
))
{
ends_
.
at
(
i
)
=
begins_
.
at
(
i
)
+
1
;
strides_
.
at
(
i
)
=
1
;
...
...
@@ -120,7 +120,7 @@ std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) {
out_shape
.
emplace_back
(
old_out_shape
.
at
(
i
));
}
}
for
(
in
t
i
=
shrink_axis_mask_
.
size
();
i
<
old_out_shape
.
size
();
i
++
)
{
for
(
size_
t
i
=
shrink_axis_mask_
.
size
();
i
<
old_out_shape
.
size
();
i
++
)
{
out_shape
.
emplace_back
(
old_out_shape
.
at
(
i
));
}
return
out_shape
;
...
...
@@ -128,7 +128,7 @@ std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) {
/*only one bit will be used if multiple bits are true.*/
void
StridedSlice
::
ApplyEllipsisMask
()
{
for
(
in
t
i
=
0
;
i
<
ellipsis_mask_
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
ellipsis_mask_
.
size
();
i
++
)
{
if
(
ellipsis_mask_
.
at
(
i
))
{
begins_
.
at
(
i
)
=
0
;
ends_
.
at
(
i
)
=
in_shape_
.
at
(
i
);
...
...
@@ -204,7 +204,7 @@ int StridedSlice::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::ve
output_shape
.
clear
();
output_shape
.
resize
(
in_shape_
.
size
());
for
(
int
i
=
0
;
i
<
in_shape_
.
size
(
);
i
++
)
{
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
in_shape_
.
size
()
);
i
++
)
{
if
(
i
<
ndim_
&&
new_axis_mask_
.
at
(
i
))
{
output_shape
.
at
(
i
)
=
1
;
}
else
{
...
...
mindspore/lite/src/ops/transpose.cc
浏览文件 @
6fe36278
...
...
@@ -63,7 +63,7 @@ int Transpose::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<ten
std
::
vector
<
int
>
in_shape
=
input
->
shape
();
std
::
vector
<
int
>
out_shape
;
out_shape
.
resize
(
perm
.
size
());
for
(
in
t
i
=
0
;
i
<
perm
.
size
();
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
perm
.
size
();
++
i
)
{
out_shape
[
i
]
=
in_shape
[
perm
[
i
]];
}
output
->
set_shape
(
out_shape
);
...
...
mindspore/lite/src/ops/unsqueeze.cc
浏览文件 @
6fe36278
...
...
@@ -67,10 +67,10 @@ int Unsqueeze::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<ten
}
}
else
{
auto
sz
=
in_rank
+
dim_rank
;
in
t
in_itr
=
0
;
in
t
ax_itr
=
0
;
for
(
in
t
i
=
0
;
i
<
sz
;
i
++
)
{
if
(
ax_itr
<
dim_rank
&&
dims
[
ax_itr
]
==
i
)
{
size_
t
in_itr
=
0
;
size_
t
ax_itr
=
0
;
for
(
size_
t
i
=
0
;
i
<
sz
;
i
++
)
{
if
(
ax_itr
<
dim_rank
&&
dims
[
ax_itr
]
==
static_cast
<
int
>
(
i
)
)
{
out_shape
.
emplace_back
(
1
);
ax_itr
++
;
}
else
if
(
ax_itr
<
dim_rank
&&
dims
[
ax_itr
]
+
sz
==
i
)
{
...
...
mindspore/lite/src/ops/unstack.cc
浏览文件 @
6fe36278
...
...
@@ -39,7 +39,7 @@ int Unstack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor
MS_ASSERT
(
input
!=
nullptr
);
auto
input_shape
=
input
->
shape
();
int
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input_shape
.
size
()
:
GetAxis
();
auto
axis
=
GetAxis
()
<
0
?
GetAxis
()
+
input_shape
.
size
()
:
GetAxis
();
if
(
axis
<
0
||
axis
>=
input_shape
.
size
())
{
MS_LOG
(
ERROR
)
<<
"Invalid axis "
<<
GetAxis
();
return
RET_PARAM_INVALID
;
...
...
mindspore/lite/src/ops/where.cc
浏览文件 @
6fe36278
...
...
@@ -66,8 +66,8 @@ int Where::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor:
auto
shape_tmp1
=
inputs_
.
at
(
1
)
->
shape
();
auto
shape_tmp2
=
inputs_
.
at
(
2
)
->
shape
();
int
axisout
=
0
;
in
t
temp
=
0
;
for
(
in
t
j
=
0
;
j
<
shape_tmp
.
size
();
j
++
)
{
size_
t
temp
=
0
;
for
(
size_
t
j
=
0
;
j
<
shape_tmp
.
size
();
j
++
)
{
if
(
shape_tmp
[
j
]
==
shape_tmp1
[
j
]
&&
shape_tmp
[
j
]
!=
shape_tmp2
[
j
])
{
axisout
=
j
;
break
;
...
...
mindspore/lite/tools/anf_exporter/anf_exporter.cc
浏览文件 @
6fe36278
...
...
@@ -118,7 +118,7 @@ int AnfExporter::ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &me
// activation
auto
input_quant_params
=
primitive
->
GetInputQuantParams
();
auto
node_type
=
primitive
->
GetPrimitiveT
()
->
value
.
type
;
for
(
in
t
i
=
0
;
i
<
input_quant_params
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
input_quant_params
.
size
();
i
++
)
{
if
(
i
>=
dst_node
->
inputIndex
.
size
())
{
MS_LOG
(
ERROR
)
<<
"node: "
<<
dst_node
->
name
<<
" input has "
<<
input_quant_params
.
size
()
<<
" quant_params; but only "
<<
dst_node
->
inputIndex
.
size
()
<<
" input"
;
...
...
@@ -375,7 +375,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<s
if
(
utils
::
isa
<
abstract
::
AbstractTuple
>
(
cnode
->
abstract
()))
{
auto
tuple
=
std
::
reinterpret_pointer_cast
<
abstract
::
AbstractTuple
>
(
cnode
->
abstract
());
for
(
in
t
i
=
0
;
i
<
tuple
->
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
tuple
->
size
();
i
++
)
{
auto
msTensor
=
new
schema
::
TensorT
();
msTensor
->
nodeType
=
schema
::
NodeType_Parameter
;
fb_node
->
outputIndex
.
emplace_back
(
meta_graphT
->
allTensors
.
size
());
...
...
mindspore/lite/tools/converter/legacy_optimizer/graph/eltwise_format_trans_pass.cc
浏览文件 @
6fe36278
...
...
@@ -136,7 +136,7 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) {
STATUS
status
=
RET_OK
;
auto
input_tensor_size
=
(
*
iter
)
->
inputIndex
.
size
();
for
(
auto
i
=
0
;
i
<
input_tensor_size
;
i
++
)
{
for
(
size_t
i
=
0
;
i
<
input_tensor_size
;
i
++
)
{
iter
=
InsertFormatTransNode
(
graph
,
iter
,
kBefore
,
i
,
pre_insert_trans_type_
,
&
status
);
if
(
status
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Insert"
<<
pre_insert_trans_type_
<<
"before "
<<
(
*
iter
)
->
name
<<
" failed"
;
...
...
@@ -144,7 +144,7 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) {
}
}
auto
output_tensor_size
=
(
*
iter
)
->
outputIndex
.
size
();
for
(
auto
i
=
0
;
i
<
output_tensor_size
;
i
++
)
{
for
(
size_t
i
=
0
;
i
<
output_tensor_size
;
i
++
)
{
iter
=
InsertFormatTransNode
(
graph
,
iter
,
kAfter
,
i
,
post_insert_trans_type_
,
&
status
);
if
(
status
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"Insert"
<<
post_insert_trans_type_
<<
"Node before "
<<
(
*
iter
)
->
name
<<
" failed"
;
...
...
mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc
浏览文件 @
6fe36278
...
...
@@ -37,16 +37,15 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
// check bottom size
if
(
proto
.
bottom_size
()
!=
CAFFE_BATCHNORMAL_BOTTOM_SIZE
)
{
// MS_LOGE("Layer %s bottom numbers is error, it must be %d, but is %d", proto.name().c_str(),
// CAFFE_BATCHNORMAL_BOTTOM_SIZE, proto.bottom_size()
);
MS_LOG
(
ERROR
)
<<
"Layer "
<<
proto
.
name
().
c_str
()
<<
"bottom numbers is error, it must be "
\
<<
CAFFE_BATCHNORMAL_BOTTOM_SIZE
<<
"but is "
<<
proto
.
bottom_size
(
);
return
RET_ERROR
;
}
// check top size
if
(
proto
.
top_size
()
!=
CAFFE_BATCHNORMAL_TOP_SIZE
)
{
// MS_LOGE("Layer %s top numbers is error, it must be %d, but is %d", \
proto
.
name
().
c_str
(),
CAFFE_BATCHNORMAL_TOP_SIZE
,
// proto.top_size());
MS_LOG
(
ERROR
)
<<
"Layer "
<<
proto
.
name
().
c_str
()
<<
"top numbers is error, it must be "
\
<<
CAFFE_BATCHNORMAL_TOP_SIZE
<<
"but is "
<<
proto
.
top_size
();
return
RET_ERROR
;
}
...
...
mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc
浏览文件 @
6fe36278
...
...
@@ -23,7 +23,7 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) {
net
=
proto
;
if
(
proto
.
layer_size
()
==
0
)
{
// MS_LOGE("net layer num is zero, prototxt file may be invalid.")
;
MS_LOG
(
ERROR
)
<<
"net layer num is zero, prototxt file may be invalid."
;
return
RET_ERROR
;
}
...
...
@@ -32,12 +32,13 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) {
SetTopsAndBottoms
();
FindInputAndOutput
();
return
RET_OK
;
}
STATUS
CaffeInspector
::
ParseInput
()
{
if
(
net
.
input_size
()
>
0
)
{
// MS_LOGI("This net exist input.")
;
for
(
in
t
i
=
0
;
i
<
net
.
input_size
();
i
++
)
{
MS_LOG
(
INFO
)
<<
"This net exist input."
;
for
(
size_
t
i
=
0
;
i
<
net
.
input_size
();
i
++
)
{
graphInput
.
insert
(
net
.
input
(
i
));
}
}
...
...
@@ -55,6 +56,7 @@ STATUS CaffeInspector::FindInputAndOutput() {
graphOutput
.
insert
(
iter
);
}
}
return
RET_OK
;
}
STATUS
CaffeInspector
::
SetTopsAndBottoms
()
{
...
...
@@ -73,6 +75,7 @@ STATUS CaffeInspector::SetTopsAndBottoms() {
layerBottoms
.
insert
(
layer
.
bottom
(
j
));
}
}
return
RET_OK
;
}
}
// namespace lite
}
// namespace mindspore
...
...
mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc
浏览文件 @
6fe36278
...
...
@@ -95,6 +95,7 @@ STATUS ConvertShape(const caffe::BlobProto &proto, std::vector<int32_t> *shape)
shape
->
push_back
(
proto
.
shape
().
dim
(
i
));
}
}
return
RET_OK
;
}
}
// namespace lite
}
// namespace mindspore
...
...
mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc
浏览文件 @
6fe36278
...
...
@@ -49,7 +49,7 @@ STATUS TfliteAddNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
op
->
primitive
->
value
.
value
=
attr
.
release
();
// set input
for
(
in
t
i
=
0
;
i
<
tflite_op
->
inputs
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
tflite_op
->
inputs
.
size
();
i
++
)
{
AddOpInput
(
op
,
tensors_id
,
tensors_format
,
tensors_id_map
,
tflite_op
->
inputs
[
i
],
tensors_id
->
size
(),
tflite_tensors
.
size
(),
schema
::
Format_NHWC
);
}
...
...
mindspore/lite/tools/converter/quantizer/general_bitpacking.cc
浏览文件 @
6fe36278
...
...
@@ -74,7 +74,7 @@ void BitPack::BitPacking(const std::vector<uint8_t>& originDataVec, std::vector<
size_t
remainBitData
=
bitDataVec
.
size
();
if
(
8
>
remainBitData
&&
remainBitData
>
0
)
{
for
(
in
t
i
=
0
;
i
<
8
-
remainBitData
;
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
8
-
remainBitData
;
i
++
)
{
bitDataVec
.
push
(
0
);
}
PackFromOriginToUint8
(
bitDataVec
,
packedDataVec
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录