提交 7d0c8e52 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!4756 Clean cmake building warnings.

Merge pull request !4756 from wangshaocong/lite_clean
...@@ -616,7 +616,7 @@ build_lite() ...@@ -616,7 +616,7 @@ build_lite()
-DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \ -DANDROID_STL="c++_shared" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DBUILD_DEVICE=on -DPLATFORM_ARM32=on -DENABLE_NEON=on -DSUPPORT_TRAIN=${SUPPORT_TRAIN} -DBUILD_CONVERTER=off \ -DBUILD_DEVICE=on -DPLATFORM_ARM32=on -DENABLE_NEON=on -DSUPPORT_TRAIN=${SUPPORT_TRAIN} -DBUILD_CONVERTER=off \
-DSUPPORT_GPU=${ENABLE_GPU} -DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \ -DSUPPORT_GPU=${ENABLE_GPU} -DOFFLINE_COMPILE=${OPENCL_OFFLINE_COMPILE} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \
-DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp "${BASEPATH}/mindspore/lite" -DCMAKE_INSTALL_PREFIX=${BASEPATH}/output/tmp "${BASEPATH}/mindspore/lite"
else else
cmake -DBUILD_DEVICE=on -DPLATFORM_ARM64=off -DBUILD_CONVERTER=${ENABLE_CONVERTER} -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \ cmake -DBUILD_DEVICE=on -DPLATFORM_ARM64=off -DBUILD_CONVERTER=${ENABLE_CONVERTER} -DSUPPORT_TRAIN=${SUPPORT_TRAIN} \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSUPPORT_GPU=${ENABLE_GPU} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \ -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSUPPORT_GPU=${ENABLE_GPU} -DBUILD_MINDDATA=${COMPILE_MINDDATA_LITE} \
......
...@@ -48,7 +48,7 @@ int AddN::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::T ...@@ -48,7 +48,7 @@ int AddN::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::T
if (!GetInferFlag()) { if (!GetInferFlag()) {
return RET_OK; return RET_OK;
} }
for (int i = 1; i < inputs.size(); ++i) { for (size_t i = 1; i < inputs.size(); ++i) {
if (inputs.at(i)->shape() != inputs.at(0)->shape()) { if (inputs.at(i)->shape() != inputs.at(0)->shape()) {
MS_LOG(ERROR) << "AddN inputs shape is not equal!"; MS_LOG(ERROR) << "AddN inputs shape is not equal!";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
......
...@@ -63,7 +63,7 @@ int ArgMax::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor ...@@ -63,7 +63,7 @@ int ArgMax::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
} }
std::vector<int> output_shape(input->shape()); std::vector<int> output_shape(input->shape());
auto input_shape_size = input->shape().size(); auto input_shape_size = input->shape().size();
int axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); auto axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis();
if (axis >= input_shape_size || axis < 0) { if (axis >= input_shape_size || axis < 0) {
MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size; MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size;
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
......
...@@ -61,7 +61,7 @@ int ArgMin::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector< ...@@ -61,7 +61,7 @@ int ArgMin::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<
return RET_OK; return RET_OK;
} }
auto input_shape_size = input->shape().size(); auto input_shape_size = input->shape().size();
int axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); auto axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis();
if (axis >= input_shape_size || axis < 0) { if (axis >= input_shape_size || axis < 0) {
MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size; MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size;
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
......
...@@ -55,7 +55,7 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec ...@@ -55,7 +55,7 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec
ndim_ = input_shape1.size(); ndim_ = input_shape1.size();
auto fill_dim_num = input_shape1.size() - input_shape0.size(); auto fill_dim_num = input_shape1.size() - input_shape0.size();
int j = 0; int j = 0;
for (int i = 0; i < input_shape1.size(); i++) { for (size_t i = 0; i < input_shape1.size(); i++) {
if (i < fill_dim_num) { if (i < fill_dim_num) {
in_shape0_[i] = 1; in_shape0_[i] = 1;
} else { } else {
...@@ -68,7 +68,7 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec ...@@ -68,7 +68,7 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec
ndim_ = input_shape0.size(); ndim_ = input_shape0.size();
auto fill_dim_num = input_shape0.size() - input_shape1.size(); auto fill_dim_num = input_shape0.size() - input_shape1.size();
int j = 0; int j = 0;
for (int i = 0; i < input_shape0.size(); i++) { for (size_t i = 0; i < input_shape0.size(); i++) {
if (i < fill_dim_num) { if (i < fill_dim_num) {
in_shape1_[i] = 1; in_shape1_[i] = 1;
} else { } else {
...@@ -77,14 +77,14 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec ...@@ -77,14 +77,14 @@ int Arithmetic::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vec
in_shape0_[i] = input_shape0[i]; in_shape0_[i] = input_shape0[i];
} }
} else { } else {
for (int i = 0; i < input_shape0.size(); i++) { for (size_t i = 0; i < input_shape0.size(); i++) {
in_shape1_[i] = input_shape1[i]; in_shape1_[i] = input_shape1[i];
in_shape0_[i] = input_shape0[i]; in_shape0_[i] = input_shape0[i];
} }
} }
std::vector<int> output_shape; std::vector<int> output_shape;
for (size_t i = 0; i < ndim_; i++) { for (int i = 0; i < ndim_; i++) {
if (in_shape0_[i] != in_shape1_[i]) { if (in_shape0_[i] != in_shape1_[i]) {
if (in_shape0_[i] == 1) { if (in_shape0_[i] == 1) {
out_shape_[i] = in_shape1_[i]; out_shape_[i] = in_shape1_[i];
......
...@@ -85,7 +85,7 @@ int BatchToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::ve ...@@ -85,7 +85,7 @@ int BatchToSpace::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::ve
MS_LOG(ERROR) << "Crops size should be " << kCropsSize; MS_LOG(ERROR) << "Crops size should be " << kCropsSize;
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
} }
size_t mul_block_shape = 1; int mul_block_shape = 1;
for (size_t i = 0; i < kBlockShapeSize; ++i) { for (size_t i = 0; i < kBlockShapeSize; ++i) {
if (block_shape[i] <= 0) { if (block_shape[i] <= 0) {
......
...@@ -58,7 +58,7 @@ int Concat::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor ...@@ -58,7 +58,7 @@ int Concat::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
MS_ASSERT(concat_prim != nullptr); MS_ASSERT(concat_prim != nullptr);
auto input0_shape = inputs_.at(0)->shape(); auto input0_shape = inputs_.at(0)->shape();
int axis = GetAxis() < 0 ? GetAxis() + input0_shape.size() : GetAxis(); auto axis = GetAxis() < 0 ? GetAxis() + input0_shape.size() : GetAxis();
if (axis < 0 || axis >= input0_shape.size()) { if (axis < 0 || axis >= input0_shape.size()) {
MS_LOG(ERROR) << "Invalid axis: " << axis; MS_LOG(ERROR) << "Invalid axis: " << axis;
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
......
...@@ -58,7 +58,7 @@ int EmbeddingLookup::InferShape(std::vector<tensor::Tensor *> inputs_, std::vect ...@@ -58,7 +58,7 @@ int EmbeddingLookup::InferShape(std::vector<tensor::Tensor *> inputs_, std::vect
for (size_t i = 0; i < embedding_shape.size(); ++i) { for (size_t i = 0; i < embedding_shape.size(); ++i) {
output_shape.push_back(embedding_shape.at(i)); output_shape.push_back(embedding_shape.at(i));
} }
for (int i = 1; i < inputs_.size() - 1; ++i) { for (size_t i = 1; i < inputs_.size() - 1; ++i) {
auto embedding_shape_t = inputs_.at(i)->shape(); auto embedding_shape_t = inputs_.at(i)->shape();
embedding_shape_t.erase(embedding_shape_t.begin()); embedding_shape_t.erase(embedding_shape_t.begin());
if (embedding_shape_t != embedding_shape) { if (embedding_shape_t != embedding_shape) {
......
...@@ -51,7 +51,7 @@ int ExpandDims::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<te ...@@ -51,7 +51,7 @@ int ExpandDims::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<te
if (dim < 0) { if (dim < 0) {
dim += input->shape().size() + 1; dim += input->shape().size() + 1;
} }
if (dim > input->shape().size()) { if (dim > static_cast<int>(input->shape().size())) {
MS_LOG(ERROR) << "attribute dim out of range"; MS_LOG(ERROR) << "attribute dim out of range";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
} }
......
...@@ -42,7 +42,7 @@ int Flatten::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tenso ...@@ -42,7 +42,7 @@ int Flatten::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tenso
std::vector<int> output_shape(2); std::vector<int> output_shape(2);
output_shape[0] = input_shape[0]; output_shape[0] = input_shape[0];
output_shape[1] = 1; output_shape[1] = 1;
for (int i = 1; i < input_shape.size(); i++) { for (size_t i = 1; i < input_shape.size(); i++) {
output_shape[1] *= input_shape[i]; output_shape[1] *= input_shape[i];
} }
output->set_shape(output_shape); output->set_shape(output_shape);
......
...@@ -60,7 +60,7 @@ int FullConnection::InferShape(std::vector<lite::tensor::Tensor *> inputs_, ...@@ -60,7 +60,7 @@ int FullConnection::InferShape(std::vector<lite::tensor::Tensor *> inputs_,
MS_LOG(ERROR) << "Input tensors num error"; MS_LOG(ERROR) << "Input tensors num error";
return 1; return 1;
} }
if (GetAxis() < 1 || GetAxis() > input0->shape().size()) { if (GetAxis() < 1 || GetAxis() > static_cast<int>(input0->shape().size())) {
MS_LOG(ERROR) << "FullConnection axis invalid"; MS_LOG(ERROR) << "FullConnection axis invalid";
return 1; return 1;
} }
......
...@@ -83,7 +83,7 @@ int Gather::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor ...@@ -83,7 +83,7 @@ int Gather::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
} }
std::vector<int> out_shape{in_shape}; std::vector<int> out_shape{in_shape};
out_shape.erase(out_shape.begin() + axis); out_shape.erase(out_shape.begin() + axis);
for (size_t i = 0; i < indices_rank; i++) { for (int i = 0; i < indices_rank; i++) {
out_shape.insert(out_shape.begin() + axis, indices_shape[i]); out_shape.insert(out_shape.begin() + axis, indices_shape[i]);
} }
output->set_shape(out_shape); output->set_shape(out_shape);
......
...@@ -56,7 +56,7 @@ int MatMul::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor ...@@ -56,7 +56,7 @@ int MatMul::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
MS_LOG(ERROR) << "inputs shape is invalid"; MS_LOG(ERROR) << "inputs shape is invalid";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
} }
for (int i = 0; i < a_shape.size() - 2; ++i) { for (size_t i = 0; i < a_shape.size() - 2; ++i) {
if (a_shape[i] != b_shape[i]) { if (a_shape[i] != b_shape[i]) {
MS_LOG(ERROR) << "Op MatMul's dimensions must be equal"; MS_LOG(ERROR) << "Op MatMul's dimensions must be equal";
return RET_INPUT_TENSOR_ERROR; return RET_INPUT_TENSOR_ERROR;
......
...@@ -67,7 +67,7 @@ int Mean::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor:: ...@@ -67,7 +67,7 @@ int Mean::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::
// reduce on all axes // reduce on all axes
if (num_axes == 0) { if (num_axes == 0) {
if (keep_dims) { if (keep_dims) {
for (auto i = 0; i < in_shape.size(); i++) { for (size_t i = 0; i < in_shape.size(); i++) {
out_shape.push_back(1); out_shape.push_back(1);
} }
} }
...@@ -78,7 +78,7 @@ int Mean::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor:: ...@@ -78,7 +78,7 @@ int Mean::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor::
// reduce on selected axes // reduce on selected axes
for (size_t i = 0; i < in_shape.size(); i++) { for (size_t i = 0; i < in_shape.size(); i++) {
bool reduce_axis = false; bool reduce_axis = false;
for (int idx = 0; idx < num_axes; ++idx) { for (size_t idx = 0; idx < num_axes; ++idx) {
if (static_cast<size_t>(axes[idx]) == i) { if (static_cast<size_t>(axes[idx]) == i) {
reduce_axis = true; reduce_axis = true;
break; break;
......
...@@ -110,7 +110,7 @@ int PriorBox::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tens ...@@ -110,7 +110,7 @@ int PriorBox::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tens
std::vector<float> different_aspect_ratios{1.0f}; std::vector<float> different_aspect_ratios{1.0f};
auto aspect_ratios = GetAspectRatios(); auto aspect_ratios = GetAspectRatios();
MS_ASSERT(aspect_ratios != nullptr); MS_ASSERT(aspect_ratios != nullptr);
for (auto i = 0; i < aspect_ratios.size(); i++) { for (size_t i = 0; i < aspect_ratios.size(); i++) {
float ratio = aspect_ratios[i]; float ratio = aspect_ratios[i];
bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(), bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(),
[&](float v) { return abs(ratio - v) < 1e-6; }); [&](float v) { return abs(ratio - v) < 1e-6; });
......
...@@ -71,7 +71,7 @@ int Reduce::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor ...@@ -71,7 +71,7 @@ int Reduce::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
// reduce on all axes // reduce on all axes
if (num_axes == 0) { if (num_axes == 0) {
if (keep_dims) { if (keep_dims) {
for (auto i = 0; i < in_shape.size(); i++) { for (size_t i = 0; i < in_shape.size(); i++) {
out_shape.push_back(1); out_shape.push_back(1);
} }
} }
...@@ -82,7 +82,7 @@ int Reduce::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor ...@@ -82,7 +82,7 @@ int Reduce::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor
// reduce on selected axes // reduce on selected axes
for (size_t i = 0; i < in_shape.size(); i++) { for (size_t i = 0; i < in_shape.size(); i++) {
bool reduce_axis = false; bool reduce_axis = false;
for (int idx = 0; idx < num_axes; ++idx) { for (size_t idx = 0; idx < num_axes; ++idx) {
if (static_cast<size_t>(axes[idx]) == i || static_cast<size_t>(axes[idx] + in_shape.size()) == i) { if (static_cast<size_t>(axes[idx]) == i || static_cast<size_t>(axes[idx] + in_shape.size()) == i) {
reduce_axis = true; reduce_axis = true;
break; break;
......
...@@ -80,15 +80,15 @@ void CalShape(const T *data, const std::vector<tensor::Tensor *> &inputs, std::v ...@@ -80,15 +80,15 @@ void CalShape(const T *data, const std::vector<tensor::Tensor *> &inputs, std::v
int input_count = inputs[0]->ElementsNum(); int input_count = inputs[0]->ElementsNum();
int index = 0; int index = 0;
int size = 1; int size = 1;
for (size_t i = 0; i < shape_size; i++) { for (int i = 0; i < shape_size; i++) {
if (data[i] == -1) { if (static_cast<int>(data[i]) == -1) {
index = i; index = i;
} else { } else {
size *= data[i]; size *= data[i];
} }
out_shape->push_back(data[i]); out_shape->push_back(data[i]);
} }
if (data[index] == -1) { if (static_cast<int>(data[index]) == -1) {
(*out_shape)[index] = input_count / size; (*out_shape)[index] = input_count / size;
} }
} }
......
...@@ -67,7 +67,7 @@ int SliceOp::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector< ...@@ -67,7 +67,7 @@ int SliceOp::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::vector<
std::vector<int32_t> slice_begin(GetBegin().begin(), GetBegin().end()); std::vector<int32_t> slice_begin(GetBegin().begin(), GetBegin().end());
std::vector<int32_t> slice_size(GetSize().begin(), GetSize().end()); std::vector<int32_t> slice_size(GetSize().begin(), GetSize().end());
std::vector<int32_t> output_shape(input_shape.size()); std::vector<int32_t> output_shape(input_shape.size());
for (int i = 0; i < input_shape.size(); ++i) { for (size_t i = 0; i < input_shape.size(); ++i) {
if (slice_size[i] < 0 && slice_size[i] != -1) { if (slice_size[i] < 0 && slice_size[i] != -1) {
MS_LOG(ERROR) << "Invalid size input!size[" << i << "]=" << slice_size[i]; MS_LOG(ERROR) << "Invalid size input!size[" << i << "]=" << slice_size[i];
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
......
...@@ -62,7 +62,7 @@ int Split::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor: ...@@ -62,7 +62,7 @@ int Split::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor:
return RET_ERROR; return RET_ERROR;
} }
int number_split = spilt_prim->numberSplit(); int number_split = spilt_prim->numberSplit();
if (outputs_.size() != number_split) { if (static_cast<int>(outputs_.size()) != number_split) {
MS_LOG(ERROR) << "outputs number is not equal to " << number_split; MS_LOG(ERROR) << "outputs number is not equal to " << number_split;
return RET_ERROR; return RET_ERROR;
} }
......
...@@ -62,15 +62,15 @@ int Squeeze::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tenso ...@@ -62,15 +62,15 @@ int Squeeze::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tenso
axes_.push_back(*iter); axes_.push_back(*iter);
} }
if (axes_.size() == 0) { if (axes_.size() == 0) {
for (int i = 0; i < in_shape.size(); i++) { for (size_t i = 0; i < in_shape.size(); i++) {
if (in_shape[i] != 1) { if (in_shape[i] != 1) {
out_shape.push_back(in_shape[i]); out_shape.push_back(in_shape[i]);
} }
} }
} else { } else {
int axisIdx = 0; size_t axisIdx = 0;
for (int i = 0; i < in_shape.size(); i++) { for (size_t i = 0; i < in_shape.size(); i++) {
if (axisIdx < axes_.size() && axes_[axisIdx] == i) { if (axisIdx < axes_.size() && axes_[axisIdx] == static_cast<int>(i)) {
MS_ASSERT(in_shape[i] == 1); MS_ASSERT(in_shape[i] == 1);
axisIdx++; axisIdx++;
continue; continue;
......
...@@ -64,7 +64,7 @@ int Stack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor:: ...@@ -64,7 +64,7 @@ int Stack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor::
auto input_shape = input->shape(); auto input_shape = input->shape();
std::vector<int32_t> output_shape = input_shape; std::vector<int32_t> output_shape = input_shape;
int axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); auto axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis();
if (axis < 0 || axis > input_shape.size()) { if (axis < 0 || axis > input_shape.size()) {
MS_LOG(ERROR) << "Invalid axis " << GetAxis(); MS_LOG(ERROR) << "Invalid axis " << GetAxis();
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
......
...@@ -89,7 +89,7 @@ constexpr int kStridedSliceInputNum = 1; ...@@ -89,7 +89,7 @@ constexpr int kStridedSliceInputNum = 1;
} // namespace } // namespace
void StridedSlice::ApplyNewAxisMask() { void StridedSlice::ApplyNewAxisMask() {
for (int i = 0; i < new_axis_mask_.size(); i++) { for (size_t i = 0; i < new_axis_mask_.size(); i++) {
if (new_axis_mask_.at(i)) { if (new_axis_mask_.at(i)) {
ndim_ += 1; ndim_ += 1;
in_shape_.insert(in_shape_.begin() + i, 1); in_shape_.insert(in_shape_.begin() + i, 1);
...@@ -112,7 +112,7 @@ void StridedSlice::ApplyNewAxisMask() { ...@@ -112,7 +112,7 @@ void StridedSlice::ApplyNewAxisMask() {
std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) { std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) {
auto old_out_shape = out_shape; auto old_out_shape = out_shape;
out_shape.clear(); out_shape.clear();
for (int i = 0; i < shrink_axis_mask_.size(); i++) { for (size_t i = 0; i < shrink_axis_mask_.size(); i++) {
if (shrink_axis_mask_.at(i)) { if (shrink_axis_mask_.at(i)) {
ends_.at(i) = begins_.at(i) + 1; ends_.at(i) = begins_.at(i) + 1;
strides_.at(i) = 1; strides_.at(i) = 1;
...@@ -120,7 +120,7 @@ std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) { ...@@ -120,7 +120,7 @@ std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) {
out_shape.emplace_back(old_out_shape.at(i)); out_shape.emplace_back(old_out_shape.at(i));
} }
} }
for (int i = shrink_axis_mask_.size(); i < old_out_shape.size(); i++) { for (size_t i = shrink_axis_mask_.size(); i < old_out_shape.size(); i++) {
out_shape.emplace_back(old_out_shape.at(i)); out_shape.emplace_back(old_out_shape.at(i));
} }
return out_shape; return out_shape;
...@@ -128,7 +128,7 @@ std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) { ...@@ -128,7 +128,7 @@ std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) {
/*only one bit will be used if multiple bits are true.*/ /*only one bit will be used if multiple bits are true.*/
void StridedSlice::ApplyEllipsisMask() { void StridedSlice::ApplyEllipsisMask() {
for (int i = 0; i < ellipsis_mask_.size(); i++) { for (size_t i = 0; i < ellipsis_mask_.size(); i++) {
if (ellipsis_mask_.at(i)) { if (ellipsis_mask_.at(i)) {
begins_.at(i) = 0; begins_.at(i) = 0;
ends_.at(i) = in_shape_.at(i); ends_.at(i) = in_shape_.at(i);
...@@ -204,7 +204,7 @@ int StridedSlice::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::ve ...@@ -204,7 +204,7 @@ int StridedSlice::InferShape(std::vector<lite::tensor::Tensor *> inputs, std::ve
output_shape.clear(); output_shape.clear();
output_shape.resize(in_shape_.size()); output_shape.resize(in_shape_.size());
for (int i = 0; i < in_shape_.size(); i++) { for (int i = 0; i < static_cast<int>(in_shape_.size()); i++) {
if (i < ndim_ && new_axis_mask_.at(i)) { if (i < ndim_ && new_axis_mask_.at(i)) {
output_shape.at(i) = 1; output_shape.at(i) = 1;
} else { } else {
......
...@@ -63,7 +63,7 @@ int Transpose::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<ten ...@@ -63,7 +63,7 @@ int Transpose::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<ten
std::vector<int> in_shape = input->shape(); std::vector<int> in_shape = input->shape();
std::vector<int> out_shape; std::vector<int> out_shape;
out_shape.resize(perm.size()); out_shape.resize(perm.size());
for (int i = 0; i < perm.size(); ++i) { for (size_t i = 0; i < perm.size(); ++i) {
out_shape[i] = in_shape[perm[i]]; out_shape[i] = in_shape[perm[i]];
} }
output->set_shape(out_shape); output->set_shape(out_shape);
......
...@@ -67,10 +67,10 @@ int Unsqueeze::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<ten ...@@ -67,10 +67,10 @@ int Unsqueeze::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<ten
} }
} else { } else {
auto sz = in_rank + dim_rank; auto sz = in_rank + dim_rank;
int in_itr = 0; size_t in_itr = 0;
int ax_itr = 0; size_t ax_itr = 0;
for (int i = 0; i < sz; i++) { for (size_t i = 0; i < sz; i++) {
if (ax_itr < dim_rank && dims[ax_itr] == i) { if (ax_itr < dim_rank && dims[ax_itr] == static_cast<int>(i)) {
out_shape.emplace_back(1); out_shape.emplace_back(1);
ax_itr++; ax_itr++;
} else if (ax_itr < dim_rank && dims[ax_itr] + sz == i) { } else if (ax_itr < dim_rank && dims[ax_itr] + sz == i) {
......
...@@ -39,7 +39,7 @@ int Unstack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor ...@@ -39,7 +39,7 @@ int Unstack::InferShape(std::vector<tensor::Tensor *> inputs, std::vector<tensor
MS_ASSERT(input != nullptr); MS_ASSERT(input != nullptr);
auto input_shape = input->shape(); auto input_shape = input->shape();
int axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); auto axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis();
if (axis < 0 || axis >= input_shape.size()) { if (axis < 0 || axis >= input_shape.size()) {
MS_LOG(ERROR) << "Invalid axis " << GetAxis(); MS_LOG(ERROR) << "Invalid axis " << GetAxis();
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
......
...@@ -66,8 +66,8 @@ int Where::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor: ...@@ -66,8 +66,8 @@ int Where::InferShape(std::vector<tensor::Tensor *> inputs_, std::vector<tensor:
auto shape_tmp1 = inputs_.at(1)->shape(); auto shape_tmp1 = inputs_.at(1)->shape();
auto shape_tmp2 = inputs_.at(2)->shape(); auto shape_tmp2 = inputs_.at(2)->shape();
int axisout = 0; int axisout = 0;
int temp = 0; size_t temp = 0;
for (int j = 0; j < shape_tmp.size(); j++) { for (size_t j = 0; j < shape_tmp.size(); j++) {
if (shape_tmp[j] == shape_tmp1[j] && shape_tmp[j] != shape_tmp2[j]) { if (shape_tmp[j] == shape_tmp1[j] && shape_tmp[j] != shape_tmp2[j]) {
axisout = j; axisout = j;
break; break;
......
...@@ -118,7 +118,7 @@ int AnfExporter::ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &me ...@@ -118,7 +118,7 @@ int AnfExporter::ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &me
// activation // activation
auto input_quant_params = primitive->GetInputQuantParams(); auto input_quant_params = primitive->GetInputQuantParams();
auto node_type = primitive->GetPrimitiveT()->value.type; auto node_type = primitive->GetPrimitiveT()->value.type;
for (int i = 0; i < input_quant_params.size(); i++) { for (size_t i = 0; i < input_quant_params.size(); i++) {
if (i >= dst_node->inputIndex.size()) { if (i >= dst_node->inputIndex.size()) {
MS_LOG(ERROR) << "node: " << dst_node->name << " input has " << input_quant_params.size() MS_LOG(ERROR) << "node: " << dst_node->name << " input has " << input_quant_params.size()
<< " quant_params; but only " << dst_node->inputIndex.size() << " input"; << " quant_params; but only " << dst_node->inputIndex.size() << " input";
...@@ -375,7 +375,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<s ...@@ -375,7 +375,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<s
if (utils::isa<abstract::AbstractTuple>(cnode->abstract())) { if (utils::isa<abstract::AbstractTuple>(cnode->abstract())) {
auto tuple = std::reinterpret_pointer_cast<abstract::AbstractTuple>(cnode->abstract()); auto tuple = std::reinterpret_pointer_cast<abstract::AbstractTuple>(cnode->abstract());
for (int i = 0; i < tuple->size(); i++) { for (size_t i = 0; i < tuple->size(); i++) {
auto msTensor = new schema::TensorT(); auto msTensor = new schema::TensorT();
msTensor->nodeType = schema::NodeType_Parameter; msTensor->nodeType = schema::NodeType_Parameter;
fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size()); fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size());
......
...@@ -136,7 +136,7 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) { ...@@ -136,7 +136,7 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) {
STATUS status = RET_OK; STATUS status = RET_OK;
auto input_tensor_size = (*iter)->inputIndex.size(); auto input_tensor_size = (*iter)->inputIndex.size();
for (auto i = 0; i < input_tensor_size; i++) { for (size_t i = 0; i < input_tensor_size; i++) {
iter = InsertFormatTransNode(graph, iter, kBefore, i, pre_insert_trans_type_, &status); iter = InsertFormatTransNode(graph, iter, kBefore, i, pre_insert_trans_type_, &status);
if (status != RET_OK) { if (status != RET_OK) {
MS_LOG(ERROR) << "Insert" << pre_insert_trans_type_ << "before " << (*iter)->name << " failed"; MS_LOG(ERROR) << "Insert" << pre_insert_trans_type_ << "before " << (*iter)->name << " failed";
...@@ -144,7 +144,7 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) { ...@@ -144,7 +144,7 @@ STATUS EltwiseFormatTransPass::Run(schema::MetaGraphT *graph) {
} }
} }
auto output_tensor_size = (*iter)->outputIndex.size(); auto output_tensor_size = (*iter)->outputIndex.size();
for (auto i = 0; i < output_tensor_size; i++) { for (size_t i = 0; i < output_tensor_size; i++) {
iter = InsertFormatTransNode(graph, iter, kAfter, i, post_insert_trans_type_, &status); iter = InsertFormatTransNode(graph, iter, kAfter, i, post_insert_trans_type_, &status);
if (status != RET_OK) { if (status != RET_OK) {
MS_LOG(ERROR) << "Insert" << post_insert_trans_type_ << "Node before " << (*iter)->name << " failed"; MS_LOG(ERROR) << "Insert" << post_insert_trans_type_ << "Node before " << (*iter)->name << " failed";
......
...@@ -37,16 +37,15 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf ...@@ -37,16 +37,15 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
// check bottom size // check bottom size
if (proto.bottom_size() != CAFFE_BATCHNORMAL_BOTTOM_SIZE) { if (proto.bottom_size() != CAFFE_BATCHNORMAL_BOTTOM_SIZE) {
// MS_LOGE("Layer %s bottom numbers is error, it must be %d, but is %d", proto.name().c_str(), MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be " \
// CAFFE_BATCHNORMAL_BOTTOM_SIZE, proto.bottom_size()); << CAFFE_BATCHNORMAL_BOTTOM_SIZE << "but is " << proto.bottom_size();
return RET_ERROR; return RET_ERROR;
} }
// check top size // check top size
if (proto.top_size() != CAFFE_BATCHNORMAL_TOP_SIZE) { if (proto.top_size() != CAFFE_BATCHNORMAL_TOP_SIZE) {
// MS_LOGE("Layer %s top numbers is error, it must be %d, but is %d", \ MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "top numbers is error, it must be " \
proto.name().c_str(), CAFFE_BATCHNORMAL_TOP_SIZE, << CAFFE_BATCHNORMAL_TOP_SIZE << "but is " << proto.top_size();
// proto.top_size());
return RET_ERROR; return RET_ERROR;
} }
......
...@@ -23,7 +23,7 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) { ...@@ -23,7 +23,7 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) {
net = proto; net = proto;
if (proto.layer_size() == 0) { if (proto.layer_size() == 0) {
// MS_LOGE("net layer num is zero, prototxt file may be invalid."); MS_LOG(ERROR) << "net layer num is zero, prototxt file may be invalid.";
return RET_ERROR; return RET_ERROR;
} }
...@@ -32,12 +32,13 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) { ...@@ -32,12 +32,13 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) {
SetTopsAndBottoms(); SetTopsAndBottoms();
FindInputAndOutput(); FindInputAndOutput();
return RET_OK;
} }
STATUS CaffeInspector::ParseInput() { STATUS CaffeInspector::ParseInput() {
if (net.input_size() > 0) { if (net.input_size() > 0) {
// MS_LOGI("This net exist input."); MS_LOG(INFO) << "This net exist input.";
for (int i = 0; i < net.input_size(); i++) { for (size_t i = 0; i < net.input_size(); i++) {
graphInput.insert(net.input(i)); graphInput.insert(net.input(i));
} }
} }
...@@ -55,6 +56,7 @@ STATUS CaffeInspector::FindInputAndOutput() { ...@@ -55,6 +56,7 @@ STATUS CaffeInspector::FindInputAndOutput() {
graphOutput.insert(iter); graphOutput.insert(iter);
} }
} }
return RET_OK;
} }
STATUS CaffeInspector::SetTopsAndBottoms() { STATUS CaffeInspector::SetTopsAndBottoms() {
...@@ -73,6 +75,7 @@ STATUS CaffeInspector::SetTopsAndBottoms() { ...@@ -73,6 +75,7 @@ STATUS CaffeInspector::SetTopsAndBottoms() {
layerBottoms.insert(layer.bottom(j)); layerBottoms.insert(layer.bottom(j));
} }
} }
return RET_OK;
} }
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore
......
...@@ -95,6 +95,7 @@ STATUS ConvertShape(const caffe::BlobProto &proto, std::vector<int32_t> *shape) ...@@ -95,6 +95,7 @@ STATUS ConvertShape(const caffe::BlobProto &proto, std::vector<int32_t> *shape)
shape->push_back(proto.shape().dim(i)); shape->push_back(proto.shape().dim(i));
} }
} }
return RET_OK;
} }
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore
......
...@@ -49,7 +49,7 @@ STATUS TfliteAddNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_ ...@@ -49,7 +49,7 @@ STATUS TfliteAddNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
op->primitive->value.value = attr.release(); op->primitive->value.value = attr.release();
// set input // set input
for (int i = 0; i < tflite_op->inputs.size(); i++) { for (size_t i = 0; i < tflite_op->inputs.size(); i++) {
AddOpInput(op, tensors_id, tensors_format, tensors_id_map, AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
} }
......
...@@ -74,7 +74,7 @@ void BitPack::BitPacking(const std::vector<uint8_t>& originDataVec, std::vector< ...@@ -74,7 +74,7 @@ void BitPack::BitPacking(const std::vector<uint8_t>& originDataVec, std::vector<
size_t remainBitData = bitDataVec.size(); size_t remainBitData = bitDataVec.size();
if (8 > remainBitData && remainBitData > 0) { if (8 > remainBitData && remainBitData > 0) {
for (int i = 0; i < 8 - remainBitData; i++) { for (size_t i = 0; i < 8 - remainBitData; i++) {
bitDataVec.push(0); bitDataVec.push(0);
} }
PackFromOriginToUint8(bitDataVec, packedDataVec); PackFromOriginToUint8(bitDataVec, packedDataVec);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册