diff --git a/lite/api/light_api.cc b/lite/api/light_api.cc index 7a7f870a9ac38e4103f3f8a7c6b95a98bb6722db..65ce77276afdb4c3b7a7247cdb8ae120497d8145 100644 --- a/lite/api/light_api.cc +++ b/lite/api/light_api.cc @@ -82,7 +82,7 @@ Tensor* LightPredictor::GetInputByName(const std::string& name) { if (element == input_names_.end()) { LOG(ERROR) << "Model do not have input named with: [" << name << "], model's inputs include:"; - for (int i = 0; i < input_names_.size(); i++) { + for (size_t i = 0; i < input_names_.size(); i++) { LOG(ERROR) << "[" << input_names_[i] << "]"; } return nullptr; @@ -114,7 +114,7 @@ void LightPredictor::PrepareFeedFetch() { auto current_block = cpp_program_desc_.GetBlock(0); std::vector feeds; std::vector fetchs; - for (int i = 0; i < current_block->OpsSize(); i++) { + for (size_t i = 0; i < current_block->OpsSize(); i++) { auto op = current_block->GetOp(i); if (op->Type() == "feed") { feeds.push_back(op); @@ -124,11 +124,11 @@ void LightPredictor::PrepareFeedFetch() { } input_names_.resize(feeds.size()); output_names_.resize(fetchs.size()); - for (int i = 0; i < feeds.size(); i++) { + for (size_t i = 0; i < feeds.size(); i++) { input_names_[feeds[i]->GetAttr("col")] = feeds[i]->Output("Out").front(); } - for (int i = 0; i < fetchs.size(); i++) { + for (size_t i = 0; i < fetchs.size(); i++) { output_names_[fetchs[i]->GetAttr("col")] = fetchs[i]->Input("X").front(); } diff --git a/lite/api/light_api_test.cc b/lite/api/light_api_test.cc index b49ff8b80c936b93acd630c6e0cde03df8b22ee4..08779c0b5c9802ebc5095241b2543d8724981dff 100644 --- a/lite/api/light_api_test.cc +++ b/lite/api/light_api_test.cc @@ -37,11 +37,11 @@ TEST(LightAPI, load) { const std::vector inputs = predictor.GetInputNames(); LOG(INFO) << "input size: " << inputs.size(); - for (int i = 0; i < inputs.size(); i++) { + for (size_t i = 0; i < inputs.size(); i++) { LOG(INFO) << "inputnames: " << inputs[i]; } const std::vector outputs = predictor.GetOutputNames(); - for (int i = 0; i < outputs.size(); i++) { + for (size_t i = 0; i < outputs.size(); i++) { LOG(INFO) << "outputnames: " << outputs[i]; } diff --git a/lite/api/lite_multithread_test.cc b/lite/api/lite_multithread_test.cc index 12559d171ff3df808cf252e8e09c652246902abf..33c0a94cf1a254e42c47aa462c5cfe12e386a87e 100644 --- a/lite/api/lite_multithread_test.cc +++ b/lite/api/lite_multithread_test.cc @@ -293,13 +293,13 @@ int main(int argc, char** argv) { std::vector str_input_shapes = split_string(FLAGS_input_shape); std::vector> input_shapes; - for (int i = 0; i < str_input_shapes.size(); ++i) { + for (size_t i = 0; i < str_input_shapes.size(); ++i) { input_shapes.push_back(get_shape(str_input_shapes[i])); } std::vector str_input_shapes_0 = split_string(FLAGS_input_shape_0); std::vector> input_shapes_0; - for (int i = 0; i < str_input_shapes_0.size(); ++i) { + for (size_t i = 0; i < str_input_shapes_0.size(); ++i) { input_shapes_0.push_back(get_shape(str_input_shapes_0[i])); } diff --git a/lite/api/model_test.cc b/lite/api/model_test.cc index 898521a6fc0bf6d1f02ffa9a04f4dc6c4ea02612..f61ed9b4c38fcc3a6fe33fd26d6d3a80edcb9373 100644 --- a/lite/api/model_test.cc +++ b/lite/api/model_test.cc @@ -204,7 +204,7 @@ int main(int argc, char** argv) { LOG(INFO) << "input shapes: " << FLAGS_input_shape; std::vector str_input_shapes = split_string(FLAGS_input_shape); std::vector> input_shapes; - for (int i = 0; i < str_input_shapes.size(); ++i) { + for (size_t i = 0; i < str_input_shapes.size(); ++i) { LOG(INFO) << "input shape: " << str_input_shapes[i]; input_shapes.push_back(get_shape(str_input_shapes[i])); } diff --git a/lite/api/model_test_classify.cc b/lite/api/model_test_classify.cc index 375d249476bf5323d69ea41c3f11d07e9c8bc711..5d2011e29bfdeb166ae1ad202d96a204893888b0 100644 --- a/lite/api/model_test_classify.cc +++ b/lite/api/model_test_classify.cc @@ -310,7 +310,7 @@ int main(int argc, char** argv) { LOG(INFO) << "input shapes: " << FLAGS_input_shape; std::vector str_input_shapes = split_string(FLAGS_input_shape); std::vector> input_shapes; - for (int i = 0; i < str_input_shapes.size(); ++i) { + for (size_t i = 0; i < str_input_shapes.size(); ++i) { LOG(INFO) << "input shape: " << str_input_shapes[i]; input_shapes.push_back(get_shape(str_input_shapes[i])); } diff --git a/lite/api/model_test_detection.cc b/lite/api/model_test_detection.cc index f9be12b2c78c623a2b2c9852850576cc11815bd3..f059aca6330613f66fa93267c0c594cfba6d8833 100644 --- a/lite/api/model_test_detection.cc +++ b/lite/api/model_test_detection.cc @@ -114,7 +114,7 @@ void detect_object(const float* dout, } std::string name = FLAGS_out_txt + "_accu.txt"; FILE* fp = fopen(name.c_str(), "w"); - for (int i = 0; i < objects.size(); ++i) { + for (size_t i = 0; i < objects.size(); ++i) { Object object = objects.at(i); if (object.prob > thresh && object.x > 0 && object.y > 0 && object.width > 0 && object.height > 0) { @@ -324,7 +324,7 @@ int main(int argc, char** argv) { LOG(INFO) << "input shapes: " << FLAGS_input_shape; std::vector str_input_shapes = split_string(FLAGS_input_shape); std::vector> input_shapes; - for (int i = 0; i < str_input_shapes.size(); ++i) { + for (size_t i = 0; i < str_input_shapes.size(); ++i) { LOG(INFO) << "input shape: " << str_input_shapes[i]; input_shapes.push_back(get_shape(str_input_shapes[i])); } diff --git a/lite/api/paddle_api_test.cc b/lite/api/paddle_api_test.cc index 9b8384f2823ee121aa8bb505dd135735d9f96774..832867df079efa1baebf08da4c0d8e37958460f1 100644 --- a/lite/api/paddle_api_test.cc +++ b/lite/api/paddle_api_test.cc @@ -36,11 +36,11 @@ TEST(CxxApi, run) { auto inputs = predictor->GetInputNames(); LOG(INFO) << "input size: " << inputs.size(); - for (int i = 0; i < inputs.size(); i++) { + for (size_t i = 0; i < inputs.size(); i++) { LOG(INFO) << "inputnames: " << inputs[i]; } auto outputs = predictor->GetOutputNames(); - for (int i = 0; i < outputs.size(); i++) { + for (size_t i = 0; i < outputs.size(); i++) { LOG(INFO) << "outputnames: " << outputs[i]; } auto input_tensor = predictor->GetInputByName(inputs[0]); diff --git a/lite/api/test_googlenet_lite.cc b/lite/api/test_googlenet_lite.cc index 8ff7a49af9cbce09d205bb8633a913410beb91c3..f20714f096756da63bdb99c5bcf57b225658b096 100644 --- a/lite/api/test_googlenet_lite.cc +++ b/lite/api/test_googlenet_lite.cc @@ -38,7 +38,7 @@ TEST(CXXApi, test_lite_googlenet) { input_tensor->Resize(input_shape); auto* data = input_tensor->mutable_data(); int input_num = 1; - for (int i = 0; i < input_shape.size(); ++i) { + for (size_t i = 0; i < input_shape.size(); ++i) { input_num *= input_shape[i]; } for (int i = 0; i < input_num; i++) { @@ -69,7 +69,7 @@ TEST(CXXApi, test_lite_googlenet) { for (size_t i = 0; i < results.size(); ++i) { EXPECT_NEAR(out->data()[i * 51], results[i], 1e-5); } - ASSERT_EQ(out->shape().size(), 2); + ASSERT_EQ(out->shape().size(), 2u); ASSERT_EQ(out->shape()[0], 1); ASSERT_EQ(out->shape()[1], 1000); } diff --git a/lite/api/test_inceptionv4_lite_x86.cc b/lite/api/test_inceptionv4_lite_x86.cc index e986784809951390889e17f766302fc5ea459465..00f775ddb7e7bf2d2f23c34ce19e576a4d2d27ed 100644 --- a/lite/api/test_inceptionv4_lite_x86.cc +++ b/lite/api/test_inceptionv4_lite_x86.cc @@ -38,7 +38,7 @@ TEST(InceptionV4, test_inceptionv4_lite_x86) { input_tensor->Resize(input_shape); auto* data = input_tensor->mutable_data(); int input_num = 1; - for (int i = 0; i < input_shape.size(); ++i) { + for (size_t i = 0; i < input_shape.size(); ++i) { input_num *= input_shape[i]; } for (int i = 0; i < input_num; i++) { @@ -69,13 +69,13 @@ TEST(InceptionV4, test_inceptionv4_lite_x86) { 0.0010612885, 0.00089107914, 0.0010112736, 0.00097655767})); auto out = predictor->GetOutput(0); - ASSERT_EQ(out->shape().size(), 2); + ASSERT_EQ(out->shape().size(), 2u); ASSERT_EQ(out->shape()[0], 1); ASSERT_EQ(out->shape()[1], 1000); int step = 50; - for (int i = 0; i < results.size(); ++i) { - for (int j = 0; j < results[i].size(); ++j) { + for (size_t i = 0; i < results.size(); ++i) { + for (size_t j = 0; j < results[i].size(); ++j) { EXPECT_NEAR(out->data()[j * step + (out->shape()[1] * i)], results[i][j], 1e-6); diff --git a/lite/api/test_mobilenetv1_lite_x86.cc b/lite/api/test_mobilenetv1_lite_x86.cc index 67dc1b2436988c7d0d853c945fecce27ef2d329f..8a7547b9031d0723c528e7dd6e8d7e3fb6201b7d 100644 --- a/lite/api/test_mobilenetv1_lite_x86.cc +++ b/lite/api/test_mobilenetv1_lite_x86.cc @@ -38,7 +38,7 @@ TEST(Mobilenet_v1, test_mobilenetv1_lite_x86) { input_tensor->Resize(input_shape); auto* data = input_tensor->mutable_data(); int input_num = 1; - for (int i = 0; i < input_shape.size(); ++i) { + for (size_t i = 0; i < input_shape.size(); ++i) { input_num *= input_shape[i]; } for (int i = 0; i < input_num; i++) { @@ -68,13 +68,13 @@ TEST(Mobilenet_v1, test_mobilenetv1_lite_x86) { 0.0048292773, 0.0013995157, 0.0018453331, 0.0002428986, 0.00020211363, 0.00013668182, 0.0005855956, 0.00025901722})); auto out = predictor->GetOutput(0); - ASSERT_EQ(out->shape().size(), 2); + ASSERT_EQ(out->shape().size(), 2u); ASSERT_EQ(out->shape()[0], 1); ASSERT_EQ(out->shape()[1], 1000); int step = 50; - for (int i = 0; i < results.size(); ++i) { - for (int j = 0; j < results[i].size(); ++j) { + for (size_t i = 0; i < results.size(); ++i) { + for (size_t j = 0; j < results[i].size(); ++j) { EXPECT_NEAR(out->data()[j * step + (out->shape()[1] * i)], results[i][j], 1e-6); diff --git a/lite/api/test_mobilenetv2_lite_x86.cc b/lite/api/test_mobilenetv2_lite_x86.cc index 95e88abcc8e59c6808ea2dc44cf7d1bdd53ac9d0..92c8182f7330a76bf55cf34fbb9e4fdba1fa2fc6 100644 --- a/lite/api/test_mobilenetv2_lite_x86.cc +++ b/lite/api/test_mobilenetv2_lite_x86.cc @@ -39,7 +39,7 @@ TEST(Mobilenet_v2, test_mobilenetv2_lite_x86) { input_tensor->Resize(input_shape); auto* data = input_tensor->mutable_data(); int input_num = 1; - for (int i = 0; i < input_shape.size(); ++i) { + for (size_t i = 0; i < input_shape.size(); ++i) { input_num *= input_shape[i]; } for (int i = 0; i < input_num; i++) { @@ -69,13 +69,13 @@ TEST(Mobilenet_v2, test_mobilenetv2_lite_x86) { 0.0070957416, 0.0016094646, 0.0018807327, 0.00010506048, 6.823785e-05, 0.00012269315, 0.0007806194, 0.00022354358})); auto out = predictor->GetOutput(0); - ASSERT_EQ(out->shape().size(), 2); + ASSERT_EQ(out->shape().size(), 2u); ASSERT_EQ(out->shape()[0], 1); ASSERT_EQ(out->shape()[1], 1000); int step = 50; - for (int i = 0; i < results.size(); ++i) { - for (int j = 0; j < results[i].size(); ++j) { + for (size_t i = 0; i < results.size(); ++i) { + for (size_t j = 0; j < results[i].size(); ++j) { EXPECT_NEAR(out->data()[j * step + (out->shape()[1] * i)], results[i][j], 1e-6); diff --git a/lite/api/test_resnet50_lite_x86.cc b/lite/api/test_resnet50_lite_x86.cc index 3f9b59d714de611ef0a84cfc3b283d0dddd5c294..b185159801b6264555367b41f7def1bd0e7a5a3f 100644 --- a/lite/api/test_resnet50_lite_x86.cc +++ b/lite/api/test_resnet50_lite_x86.cc @@ -38,7 +38,7 @@ TEST(Resnet50, test_resnet50_lite_x86) { input_tensor->Resize(input_shape); auto* data = input_tensor->mutable_data(); int input_num = 1; - for (int i = 0; i < input_shape.size(); ++i) { + for (size_t i = 0; i < input_shape.size(); ++i) { input_num *= input_shape[i]; } for (int i = 0; i < input_num; i++) { @@ -69,13 +69,13 @@ TEST(Resnet50, test_resnet50_lite_x86) { 0.006387163, 0.0037145028, 0.0012812682, 0.00045948103, 0.00013535398, 0.0002483765, 0.00076759676, 0.0002773295})); auto out = predictor->GetOutput(0); - ASSERT_EQ(out->shape().size(), 2); + ASSERT_EQ(out->shape().size(), 2u); ASSERT_EQ(out->shape()[0], 1); ASSERT_EQ(out->shape()[1], 1000); int step = 50; - for (int i = 0; i < results.size(); ++i) { - for (int j = 0; j < results[i].size(); ++j) { + for (size_t i = 0; i < results.size(); ++i) { + for (size_t j = 0; j < results[i].size(); ++j) { EXPECT_NEAR(out->data()[j * step + (out->shape()[1] * i)], results[i][j], 1e-6); diff --git a/lite/api/transform_test.cc b/lite/api/transform_test.cc index e1c315f4a63ffd3ed8f51fa4b73ac88b50835cab..3cd8416d5e2293642abc68e457465c8a836f790b 100644 --- a/lite/api/transform_test.cc +++ b/lite/api/transform_test.cc @@ -232,8 +232,8 @@ void TestModel(const std::vector& valid_places, for (int i = 0; i < outs->numel(); ++i) { LOG(INFO) << o_data[i]; } - for (int i = 0; i < lod.size(); ++i) { - for (int j = 0; j < lod[i].size(); ++j) { + for (size_t i = 0; i < lod.size(); ++i) { + for (size_t j = 0; j < lod[i].size(); ++j) { LOG(INFO) << lod[i][j]; } } diff --git a/lite/backends/x86/jit/gen/matmul.cc b/lite/backends/x86/jit/gen/matmul.cc index 010c80fac4842e74c9b8272db472ddf6cf954771..f78df73f66532f891721c74cff9c78cc3bb61922 100644 --- a/lite/backends/x86/jit/gen/matmul.cc +++ b/lite/backends/x86/jit/gen/matmul.cc @@ -40,7 +40,7 @@ void MatMulJitCode::genCode() { for (size_t g = 0; g < groups.size(); ++g) { size_t x_offset = 0; size_t wgt_offset_tmp = 0; - for (int i = 0; i < g; ++i) { + for (size_t i = 0; i < g; ++i) { wgt_offset_tmp += groups[i] * block_len; } for (int k = 0; k < k_; ++k) { diff --git a/lite/backends/x86/math/beam_search.cc b/lite/backends/x86/math/beam_search.cc index 9cf3281152840416dc141f98992499c663783b7a..5d7e98629cb89bd7a3fdee852507e0f381e54931 100644 --- a/lite/backends/x86/math/beam_search.cc +++ b/lite/backends/x86/math/beam_search.cc @@ -265,7 +265,7 @@ class BeamSearchFunctor { // size_t num_seqs = scores->NumElements(lod_level); size_t num_seqs = scores->lod()[lod_level].size() - 1; size_t seq_width = 1; - for (int i = 1; i < scores->dims().size(); i++) { + for (size_t i = 1; i < scores->dims().size(); i++) { seq_width *= scores->dims()[i]; } diff --git a/lite/backends/x86/math/blas.cc b/lite/backends/x86/math/blas.cc index 2d21adaf5d22930ff720c193696eb00c8035579d..3bc5f9f67ad96e7ec699400ff6369fe48c745b7e 100644 --- a/lite/backends/x86/math/blas.cc +++ b/lite/backends/x86/math/blas.cc @@ -23,7 +23,7 @@ namespace math { MatDescriptor CreateMatrixDescriptor(const lite::DDimLite &tensor_dim, int num_flatten_cols, bool trans) { - PADDLE_ENFORCE_GT(tensor_dim.size(), 1); + PADDLE_ENFORCE_GT(tensor_dim.size(), 1u); MatDescriptor retv; if (num_flatten_cols > 1) { auto flatten_dim = tensor_dim.Flatten2D(num_flatten_cols); diff --git a/lite/backends/x86/math/sequence_pooling.cc b/lite/backends/x86/math/sequence_pooling.cc index 34c55c5714e467954bc1bb79d9b1385ef5cfe497..2d00ebad61840da5b14fbf12d9255394b2b2df1a 100644 --- a/lite/backends/x86/math/sequence_pooling.cc +++ b/lite/backends/x86/math/sequence_pooling.cc @@ -46,9 +46,9 @@ class MaxSeqPoolFunctor { auto in_dims = input.dims(); auto out_dims = output->dims(); auto idx_dims = index->dims(); - PADDLE_ENFORCE_GT(in_dims.size(), 1); - PADDLE_ENFORCE_GT(out_dims.size(), 1); - for (int64_t i = 1; i < in_dims.size(); ++i) { + PADDLE_ENFORCE_GT(in_dims.size(), 1u); + PADDLE_ENFORCE_GT(out_dims.size(), 1u); + for (size_t i = 1; i < in_dims.size(); ++i) { PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]); } PADDLE_ENFORCE_EQ(idx_dims, out_dims); @@ -95,9 +95,9 @@ class MaxSeqPoolFunctor { lite::Tensor* index) { auto in_dims = input.dims(); auto out_dims = output->dims(); - PADDLE_ENFORCE_GT(in_dims.size(), 1); - PADDLE_ENFORCE_GT(out_dims.size(), 1); - for (int64_t i = 1; i < in_dims.size(); ++i) { + PADDLE_ENFORCE_GT(in_dims.size(), 1u); + PADDLE_ENFORCE_GT(out_dims.size(), 1u); + for (size_t i = 1; i < in_dims.size(); ++i) { PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]); } @@ -138,7 +138,7 @@ class MaxSeqPoolGradFunctor { auto idx_dims = index.dims(); PADDLE_ENFORCE_GT(og_dims.size(), 1); PADDLE_ENFORCE_GT(ig_dims.size(), 1); - for (int64_t i = 1; i < og_dims.size(); ++i) { + for (size_t i = 1; i < og_dims.size(); ++i) { PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i]); } PADDLE_ENFORCE_EQ(idx_dims, og_dims); diff --git a/lite/core/arena/framework.cc b/lite/core/arena/framework.cc index 614ee990a9811ab74ceedb4fa000fa385698d679..731215f542567ec3ff0cc87d6990624bfa6b2bc2 100644 --- a/lite/core/arena/framework.cc +++ b/lite/core/arena/framework.cc @@ -107,7 +107,7 @@ void TestCase::PrepareInputsForInstruction() { CHECK(!shared_tensor_array->empty()) << "shared_tensor_array is empty yet"; target_tensor_array->resize(shared_tensor_array->size()); - for (int i = 0; i < shared_tensor_array->size(); i++) { + for (size_t i = 0; i < shared_tensor_array->size(); i++) { target_tensor_array->at(i).Resize( shared_tensor_array->at(i).dims()); TargetCopy(param_type->type->target(), @@ -219,7 +219,7 @@ bool TestCase::CheckPrecision(const std::string& var_name, auto b_tensor_array = base_scope_->FindVar(var_name)->GetMutable>(); CHECK_EQ(a_tensor_array->size(), b_tensor_array->size()); - for (int i = 0; i < a_tensor_array->size(); i++) { + for (size_t i = 0; i < a_tensor_array->size(); i++) { Tensor* a_tensor = &(a_tensor_array->at(i)); Tensor* b_tensor = &(b_tensor_array->at(i)); if (a_tensor->dims().size() == 0 && b_tensor->dims().size() == 0) { diff --git a/lite/core/arena/framework.h b/lite/core/arena/framework.h index 7050355fbfae55b9ba626119cd95f8e952c27430..20a0792155f0b4ea8faa7c3fc15ea5c4767352ac 100644 --- a/lite/core/arena/framework.h +++ b/lite/core/arena/framework.h @@ -166,7 +166,7 @@ class TestCase { // TODO(Superjomn) Move this method to utils or DDim? bool ShapeEquals(const DDim& a, const DDim& b) { if (a.size() != b.size()) return false; - for (int i = 0; i < a.size(); i++) { + for (size_t i = 0; i < a.size(); i++) { if (a[i] != b[i]) return false; } return true; diff --git a/lite/core/device_info.cc b/lite/core/device_info.cc index 29ac96ed744b016833a746b35002dd68109efd8b..09da06a4168268c670577c159a2a306a8959d81d 100644 --- a/lite/core/device_info.cc +++ b/lite/core/device_info.cc @@ -947,7 +947,7 @@ void DeviceInfo::RequestPowerNoBindMode(int thread_num) { active_ids_ = core_ids_; } else { active_ids_.resize(thread_num); - for (int i = 0; i < thread_num; ++i) { + for (uint32_t i = 0; i < thread_num; ++i) { if (i < big_core_ids_.size()) { active_ids_[i] = big_core_ids_[i]; } else { diff --git a/lite/core/kernel.cc b/lite/core/kernel.cc index 7ec718cb3881c10dec08376419b419777c71bba6..194d736a4c0cf6fa18eae119589c5fa1fd08bca0 100644 --- a/lite/core/kernel.cc +++ b/lite/core/kernel.cc @@ -57,7 +57,7 @@ void KernelBase::ParseKernelType(const std::string &kernel_type, std::string *alias, Place *place) { auto parts = Split(kernel_type, "/"); - CHECK_EQ(parts.size(), 5); + CHECK_EQ(parts.size(), 5u); *op_type = parts[0]; *alias = parts[1]; diff --git a/lite/core/mir/fusion/conv_bn_fuser.cc b/lite/core/mir/fusion/conv_bn_fuser.cc index 43869beddd0af701d5f78ea047b30f6b136e6b3f..143a7cecce8c1c45ada9ad31e8e4bea5447fec68 100644 --- a/lite/core/mir/fusion/conv_bn_fuser.cc +++ b/lite/core/mir/fusion/conv_bn_fuser.cc @@ -163,23 +163,23 @@ void ConvBNFuser::InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) { int c_size = conv_weight_t->dims()[1] * conv_weight_t->dims()[2] * conv_weight_t->dims()[3]; int hw = conv_weight_t->dims()[2] * conv_weight_t->dims()[3]; - for (unsigned int k = 0; k < conv_weight_t->dims()[0]; ++k) { - for (unsigned int i = 0; i < h; ++i) { + for (int k = 0; k < conv_weight_t->dims()[0]; ++k) { + for (int i = 0; i < h; ++i) { weight_scale[i] *= fabsf(alpha_data[i]); if (alpha_data[i] < 0.f) { auto ptr_row = conv_weight_d + k * c_size + i * hw; - for (unsigned int j = 0; j < hw; ++j) { + for (int j = 0; j < hw; ++j) { ptr_row[j] *= -1; } } } } } else { - for (unsigned int i = 0; i < h; ++i) { + for (int i = 0; i < h; ++i) { weight_scale[i] *= fabsf(alpha_data[i]); if (alpha_data[i] < 0.f) { auto ptr_row = conv_weight_d + i * w; - for (unsigned int j = 0; j < w; ++j) { + for (int j = 0; j < w; ++j) { ptr_row[j] *= -1; } } @@ -203,17 +203,17 @@ void ConvBNFuser::InsertNewNode(SSAGraph* graph, const key2nodes_t& matched) { int c_size = conv_weight_t->dims()[1] * conv_weight_t->dims()[2] * conv_weight_t->dims()[3]; int hw = conv_weight_t->dims()[2] * conv_weight_t->dims()[3]; - for (unsigned int k = 0; k < conv_weight_t->dims()[0]; ++k) { - for (unsigned int i = 0; i < h; ++i) { + for (int k = 0; k < conv_weight_t->dims()[0]; ++k) { + for (int i = 0; i < h; ++i) { auto ptr_row = conv_weight_d + k * c_size + i * hw; - for (unsigned int j = 0; j < hw; ++j) { + for (int j = 0; j < hw; ++j) { ptr_row[j] *= alpha_data[i]; } } } } else { - for (unsigned int i = 0; i < h; ++i) { // n: conv2d output channels - for (unsigned int j = 0; j < w; ++j) { // w: conv2d input channels + for (int i = 0; i < h; ++i) { // n: conv2d output channels + for (int j = 0; j < w; ++j) { // w: conv2d input channels conv_weight_d[i * w + j] *= alpha_data[i]; } } diff --git a/lite/core/mir/fusion/quant_dequant_op_fuser.cc b/lite/core/mir/fusion/quant_dequant_op_fuser.cc index a3a98b871fb4b6f8230299cda978b0f1f8faa779..2c7cc2fe5547d6004ded99f28698478cec0a3639 100644 --- a/lite/core/mir/fusion/quant_dequant_op_fuser.cc +++ b/lite/core/mir/fusion/quant_dequant_op_fuser.cc @@ -260,7 +260,7 @@ void ChannelWiseDequantOpFuser::InsertNewNode(SSAGraph* graph, auto channel_scale_tensor = scope->FindVar(channel_scale_name)->GetMutable(); auto* channel_scale_data = channel_scale_tensor->data(); - for (int i = 0; i < channel_scale_tensor->data_size(); i++) { + for (size_t i = 0; i < channel_scale_tensor->data_size(); i++) { weight_scale.push_back(channel_scale_data[i] / range); } diff --git a/lite/core/mir/mlu_postprocess_pass.cc b/lite/core/mir/mlu_postprocess_pass.cc index 15f62f36b0f026dc42ecbb274c946e294c7fc44e..ba48d5d4ead5ea922ded0bff3a87c2c127595790 100644 --- a/lite/core/mir/mlu_postprocess_pass.cc +++ b/lite/core/mir/mlu_postprocess_pass.cc @@ -292,7 +292,7 @@ void MLUPostprocessPass::GetSubgraphOpArgType(Node* inst_node, // get subgraph op's type info size_t kernel_size = inst_node->AsStmt().kernels().size(); - CHECK_GT(kernel_size, 0); + CHECK_GT(kernel_size, 0u); VLOG(4) << "subgraph kernel size: " << kernel_size; for (size_t i = 0; i < kernel_size; ++i) { @@ -450,7 +450,7 @@ bool MLUPostprocessPass::IsFirstConvInSubgraph(Node* arg_node, Node* inst) { auto* block_desc = static_cast(inst->AsStmt().op().get()) ->GetSubBlock(); - for (int op_idx = 0; op_idx < block_desc->OpsSize(); op_idx++) { + for (size_t op_idx = 0; op_idx < block_desc->OpsSize(); op_idx++) { auto op_desc = block_desc->GetOp(op_idx); CHECK(op_desc); if (op_desc->Type() == "conv2d") { diff --git a/lite/core/mir/subgraph/subgraph_detector.cc b/lite/core/mir/subgraph/subgraph_detector.cc index b61f7f365f51a32e267dd12943be5fcfadb3e08a..6bab454c42a68a7513aa01ff06cc2be6c970e199 100644 --- a/lite/core/mir/subgraph/subgraph_detector.cc +++ b/lite/core/mir/subgraph/subgraph_detector.cc @@ -47,8 +47,8 @@ std::string SubgraphVisualizer::operator()() { "turquoise4", "snow3", "sienna4", "salmon2", }; std::unordered_map subgraph_indices; - for (int i = 0; i < subgraphs_.size(); i++) { - for (int j = 0; j < subgraphs_[i].size(); j++) { + for (size_t i = 0; i < subgraphs_.size(); i++) { + for (size_t j = 0; j < subgraphs_[i].size(); j++) { subgraph_indices[subgraphs_[i][j]] = i; } } @@ -538,7 +538,8 @@ void SubgraphFuser::ReplaceNodesWithSubgraphs(SSAGraph *graph, std::vector> subgraphs = SubgraphDetector(graph, teller)(); SubgraphVisualizer(graph, subgraphs)(); - for (int subgraph_idx = 0; subgraph_idx < subgraphs.size(); subgraph_idx++) { + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs.size(); + subgraph_idx++) { if (subgraphs[subgraph_idx].size() >= min_subgraph_size) { InsertNewNode(graph, subgraph_idx, subgraphs[subgraph_idx]); } diff --git a/lite/core/mir/subgraph/subgraph_detector_test.cc b/lite/core/mir/subgraph/subgraph_detector_test.cc index 1e54e1497b5d49754a705340aafa30ded1c2a727..f52c0332fa3cfce904d2b7c8bf010bc3d3ac6ac9 100644 --- a/lite/core/mir/subgraph/subgraph_detector_test.cc +++ b/lite/core/mir/subgraph/subgraph_detector_test.cc @@ -36,8 +36,8 @@ std::vector AddFCDesc( const std::shared_ptr& scope, const std::vector& input_var_names, const std::vector& wshape) { - CHECK_EQ(input_var_names.size(), 1); - CHECK_EQ(wshape.size(), 2); + CHECK_EQ(input_var_names.size(), 1u); + CHECK_EQ(wshape.size(), 2u); static int id = 0; std::string prefix = "fc_" + paddle::lite::to_string(id); auto* op_desc = block_desc->AddOp(); @@ -169,8 +169,8 @@ TEST(Subgraph, detect_simple_model) { }; std::vector> subgraphs = mir::SubgraphDetector(graph.get(), teller)(); - ASSERT_EQ(subgraphs.size(), 1); - ASSERT_EQ(graph->nodes().size(), 9); + ASSERT_EQ(subgraphs.size(), 1u); + ASSERT_EQ(graph->nodes().size(), 9u); mir::SubgraphVisualizer(graph.get(), subgraphs)(); } @@ -221,7 +221,7 @@ TEST(Subgraph, detect_custom_model) { std::vector> subgraphs = mir::SubgraphDetector(graph.get(), teller)(); mir::SubgraphVisualizer(graph.get(), subgraphs)(); - ASSERT_EQ(subgraphs.size(), 1); + ASSERT_EQ(subgraphs.size(), 1u); } } // namespace lite diff --git a/lite/core/mir/subgraph/subgraph_pass_test.cc b/lite/core/mir/subgraph/subgraph_pass_test.cc index a2369adc5d882310503cbf52fa5394098d824b40..c638793c08160eb8ee7edabeab0977541e85d82a 100644 --- a/lite/core/mir/subgraph/subgraph_pass_test.cc +++ b/lite/core/mir/subgraph/subgraph_pass_test.cc @@ -39,7 +39,7 @@ std::vector> ShapeParsing(std::string text) { std::vector> shapes; std::vector shape_strings = Split(text, ":"); shapes.resize(shape_strings.size()); - for (int i = 0; i < shape_strings.size(); i++) { + for (size_t i = 0; i < shape_strings.size(); i++) { std::vector shape_nums = Split(shape_strings[i], ","); for (auto shape_num : shape_nums) { shapes[i].push_back(atoi(shape_num.c_str())); @@ -66,7 +66,7 @@ void FillInputTensors( for (int j = 0; j < input_tensor_size; j++) { \ input_tensor_data[j] = static_cast(value); \ } - for (int i = 0; i < input_tensor_shape.size(); i++) { + for (size_t i = 0; i < input_tensor_shape.size(); i++) { auto input_tensor = predictor->GetInput(i); input_tensor->Resize(input_tensor_shape[i]); auto input_tensor_size = ShapeProduction(input_tensor->shape()); @@ -95,7 +95,7 @@ void CheckOutputTensors( << " abs_diff: " << abs_diff << " rel_diff: " << rel_diff; \ EXPECT_LT(rel_diff, 0.1); \ } - for (int i = 0; i < output_tensor_type.size(); i++) { + for (size_t i = 0; i < output_tensor_type.size(); i++) { auto tar_output_tensor = tar_predictor->GetOutput(i); auto ref_output_tensor = ref_predictor->GetOutput(i); auto tar_output_tensor_size = ShapeProduction(tar_output_tensor->shape()); diff --git a/lite/core/op_lite.cc b/lite/core/op_lite.cc index f8a706179374a0c86e28cf9a3638f5df2c932540..941a9e9f88cf04ef47487237b1a3f6509dea762b 100644 --- a/lite/core/op_lite.cc +++ b/lite/core/op_lite.cc @@ -41,7 +41,7 @@ bool OpLite::InferShapeWithCache() { iter++) { // combined dims value into new_hash value. auto &element_dims = (*iter)->dims(); - for (int i = 0; i < element_dims.size(); i++) { + for (size_t i = 0; i < element_dims.size(); i++) { new_hash = lite::hash_combine(new_hash, static_cast(element_dims[i])); } @@ -49,7 +49,7 @@ bool OpLite::InferShapeWithCache() { auto &emement_lods = (*iter)->lod(); for (auto lod_iter = emement_lods.begin(); lod_iter != emement_lods.end(); lod_iter++) { - for (int i = 0; i < lod_iter->size(); i++) { + for (size_t i = 0; i < lod_iter->size(); i++) { new_hash = lite::hash_combine(new_hash, static_cast(lod_iter->at(i))); } @@ -60,7 +60,7 @@ bool OpLite::InferShapeWithCache() { // if current hash value is consistent with io_shape_lod_hash_, // previous outputs shape and lod are reused. auto *current_outputs = param_.output_tensor_ptrs(); - for (int i = 0; i < current_outputs->size(); i++) { + for (size_t i = 0; i < current_outputs->size(); i++) { current_outputs->at(i)->Resize(last_output_shapes[i]); current_outputs->at(i)->set_lod(last_output_lods[i]); } @@ -69,7 +69,7 @@ bool OpLite::InferShapeWithCache() { io_shape_lod_hash_ = new_hash; this->InferShapeImpl(); auto *current_outputs = param_.output_tensor_ptrs(); - for (int i = 0; i < current_outputs->size(); i++) { + for (size_t i = 0; i < current_outputs->size(); i++) { last_output_shapes[i] = current_outputs->at(i)->dims(); last_output_lods[i] = current_outputs->at(i)->lod(); } diff --git a/lite/core/program.cc b/lite/core/program.cc index 1193e3c84f66b9d1dfb39d5dcc74265d212ab7ab..5ddf6c0e935a851cc0b3c3eb7554609939ef1cbf 100644 --- a/lite/core/program.cc +++ b/lite/core/program.cc @@ -72,7 +72,7 @@ void RuntimeProgram::UpdateVarsOfProgram(cpp::ProgramDesc* desc) { std::unordered_map origin_var_maps; auto& main_block = *desc->GetBlock(0); auto var_size = main_block.VarsSize(); - for (int i = 0; i < var_size; i++) { + for (size_t i = 0; i < var_size; i++) { auto v = main_block.GetVar(i); auto name = v->Name(); origin_var_maps.emplace(name, *v); diff --git a/lite/core/tensor.cc b/lite/core/tensor.cc index ecb9935dfd13c09cbd1a20f3833e6ab76161192a..1ae291dd40d19940e93bfda9b0c22f4092ce7988 100644 --- a/lite/core/tensor.cc +++ b/lite/core/tensor.cc @@ -100,7 +100,7 @@ void *TensorLite::mutable_data(TargetType target, size_t memory_size) { void TensorLite::ResetBuffer(std::shared_ptr buffer, size_t memory_size) { - CHECK_EQ(offset_, 0) + CHECK_EQ(offset_, 0u) << "Only the offset is supported to zero when the Buffer is reset."; if (buffer_) { CHECK_LE(memory_size_, buffer->space()) diff --git a/lite/kernels/mlu/bridges/act_op_test.cc b/lite/kernels/mlu/bridges/act_op_test.cc index 7cec0529e49e694c362b3e0a550948f7855c85a2..2b7747f4d8b647b8cb621876907f6178ebf9fe88 100644 --- a/lite/kernels/mlu/bridges/act_op_test.cc +++ b/lite/kernels/mlu/bridges/act_op_test.cc @@ -44,40 +44,40 @@ void act_ref(const std::shared_ptr op) { // "sigmoid","relu","tanh","relu_clipped","leaky_relu","softsign","hard_sigmoid" if (op_type == "sigmoid") { - for (size_t i = 0; i < out->numel(); i++) { + for (int i = 0; i < out->numel(); i++) { out_data[i] = 1.f / (1.f + std::exp(-x_data[i])); } } else if (op_type == "relu") { - for (size_t i = 0; i < out->numel(); i++) { + for (int i = 0; i < out->numel(); i++) { out_data[i] = std::max(0.f, x_data[i]); } } else if (op_type == "tanh") { - for (size_t i = 0; i < out->numel(); i++) { + for (int i = 0; i < out->numel(); i++) { out_data[i] = (std::exp(x_data[i]) - std::exp(-x_data[i])) / (std::exp(x_data[i]) + std::exp(-x_data[i])); } } else if (op_type == "relu_clipped") { auto relu_clipped_coef = op_info->GetAttr("Relu_clipped_coef"); - for (size_t i = 0; i < out->numel(); i++) { + for (int i = 0; i < out->numel(); i++) { out_data[i] = std::min(std::max(0.f, x_data[i]), relu_clipped_coef); } } else if (op_type == "relu6") { - for (size_t i = 0; i < out->numel(); i++) { + for (int i = 0; i < out->numel(); i++) { out_data[i] = std::min(std::max(0.f, x_data[i]), 6.f); } } else if (op_type == "leaky_relu") { auto alpha = op_info->GetAttr("alpha"); - for (size_t i = 0; i < out->numel(); i++) { + for (int i = 0; i < out->numel(); i++) { out_data[i] = std::max(x_data[i], x_data[i] * alpha); } } else if (op_type == "softsign") { - for (size_t i = 0; i < out->numel(); i++) { + for (int i = 0; i < out->numel(); i++) { out_data[i] = x_data[i] / (1 + std::abs(x_data[i])); } } else if (op_type == "hard_sigmoid") { auto slope = op_info->GetAttr("slope"); auto offset = op_info->GetAttr("offset"); - for (size_t i = 0; i < out->numel(); i++) { + for (int i = 0; i < out->numel(); i++) { out_data[i] = std::min(1.f, slope * x_data[i] + offset); out_data[i] = std::max(0.f, out_data[i]); } diff --git a/lite/kernels/mlu/bridges/concat_op_test.cc b/lite/kernels/mlu/bridges/concat_op_test.cc index c4b48a9ef45430ec5867d231bbc2d0a798ec66d0..1dbcaf7160fd36ab75c4a1139555650b98030482 100644 --- a/lite/kernels/mlu/bridges/concat_op_test.cc +++ b/lite/kernels/mlu/bridges/concat_op_test.cc @@ -37,7 +37,7 @@ void concat_ref(const std::shared_ptr op) { scope->FindVar(op_info->Output("Out").front())->GetMutable(); int axis = op_info->GetAttr("axis"); std::vector inputs_concat(inputs.size()); - for (int j = 0; j < inputs.size(); ++j) { + for (size_t j = 0; j < inputs.size(); ++j) { inputs_concat[j] = inputs[j]; } size_t num = inputs.size(); @@ -48,7 +48,7 @@ void concat_ref(const std::shared_ptr op) { } int out_rows = rows, out_cols = 0; std::vector inputs_cols(inputs.size()); - for (int i = 0; i < num; ++i) { + for (size_t i = 0; i < num; ++i) { int t_cols = inputs[i]->numel() / rows; out_cols += t_cols; inputs_cols[i] = t_cols; @@ -56,7 +56,7 @@ void concat_ref(const std::shared_ptr op) { for (int k = 0; k < out_rows; ++k) { float* dst_ptr = out->mutable_data() + k * out_cols; int col_idx = 0; - for (int j = 0; j < num; ++j) { + for (size_t j = 0; j < num; ++j) { int col_len = inputs_cols[j]; const float* src_prt = inputs[j]->data() + k * col_len; std::memcpy(dst_ptr + col_idx, src_prt, sizeof(float) * col_len); diff --git a/lite/kernels/mlu/bridges/conv_op.cc b/lite/kernels/mlu/bridges/conv_op.cc index 6a7ef408eb7432950d5a0985dd6e174236e937e0..e7e21f7ad2f64275746e015289c9372368e46f5c 100644 --- a/lite/kernels/mlu/bridges/conv_op.cc +++ b/lite/kernels/mlu/bridges/conv_op.cc @@ -43,20 +43,20 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) { const auto output_shape = output->dims().Vectorize(); const auto bs = input_dims[0]; const auto oc = filter_dims[0]; - CHECK_EQ(input_dims.size(), 4); - CHECK_EQ(filter_dims.size(), 4); + CHECK_EQ(input_dims.size(), 4u); + CHECK_EQ(filter_dims.size(), 4u); const auto strides = op_info->GetAttr>("strides"); auto dilations = op_info->GetAttr>("dilations"); auto paddings = op_info->GetAttr>("paddings"); - CHECK_EQ(strides.size(), 2L); - CHECK_EQ(dilations.size(), 2L); - if (paddings.size() == 2L) { + CHECK_EQ(strides.size(), 2u); + CHECK_EQ(dilations.size(), 2u); + if (paddings.size() == 2u) { for (size_t i = 0; i < strides.size(); ++i) { int copy_pad = *(paddings.begin() + 2 * i); paddings.insert(paddings.begin() + 2 * i + 1, copy_pad); } } - CHECK_EQ(paddings.size(), 4L) + CHECK_EQ(paddings.size(), 4u) << "Paddings size should be the same or twice as the input size."; const std::string padding_algorithm = diff --git a/lite/kernels/mlu/bridges/conv_op_test.cc b/lite/kernels/mlu/bridges/conv_op_test.cc index e34dd7c2a85dbda62596b6e82d820fc437bfd194..1b04814d7d88d227d0bb3e0b58aef26d62f06966 100644 --- a/lite/kernels/mlu/bridges/conv_op_test.cc +++ b/lite/kernels/mlu/bridges/conv_op_test.cc @@ -173,10 +173,10 @@ void test_conv(int bs, Tensor input_int; input_int.Resize(input_shape); FillTensor(&input_int, -127, 127); - for (int i = 0; i < input->data_size(); i++) { + for (size_t i = 0; i < input->data_size(); i++) { input->mutable_data()[i] = input_int.data()[i] * input_scale; } - for (int i = 0; i < filter->data_size(); i++) { + for (size_t i = 0; i < filter->data_size(); i++) { filter->mutable_data()[i] = filter_int->data()[i] * filter_scale; } diff --git a/lite/kernels/mlu/bridges/fc_op_test.cc b/lite/kernels/mlu/bridges/fc_op_test.cc index 8f92b6abad97650100d0862d49550abaf62daac9..fe1c889f431350b4175ac400aefe77e6392405c5 100644 --- a/lite/kernels/mlu/bridges/fc_op_test.cc +++ b/lite/kernels/mlu/bridges/fc_op_test.cc @@ -97,11 +97,11 @@ void test_fc(const std::vector& input_shape, Tensor input_int; input_int.Resize(input_shape); FillTensor(&input_int, -127, 127); - for (int i = 0; i < input->data_size(); i++) { + for (size_t i = 0; i < input->data_size(); i++) { input->mutable_data()[i] = input_int.data()[i] * input_scale; } - for (int i = 0; i < w->data_size(); i++) { + for (size_t i = 0; i < w->data_size(); i++) { w->mutable_data()[i] = w_int->data()[i] * w_scale; } diff --git a/lite/kernels/mlu/bridges/interpolate_op.cc b/lite/kernels/mlu/bridges/interpolate_op.cc index e201199824d8042abd6002ccbe5bb659a9ca2898..2c1a2aeeff799d31d4328169fce058259543fb1f 100644 --- a/lite/kernels/mlu/bridges/interpolate_op.cc +++ b/lite/kernels/mlu/bridges/interpolate_op.cc @@ -36,7 +36,7 @@ int InterpolateConverter(void* ctx, OpLite* op, KernelBase* kernel) { auto x = scope->FindVar(x_var_name)->GetMutable(); auto out = scope->FindVar(out_var_name)->GetMutable(); auto x_dims = x->dims(); - CHECK_EQ(x_dims.size(), 4); + CHECK_EQ(x_dims.size(), 4u); auto scale = op_info->GetAttr("scale"); auto out_w = op_info->GetAttr("out_w"); auto out_h = op_info->GetAttr("out_h"); diff --git a/lite/kernels/mlu/bridges/interpolate_op_test.cc b/lite/kernels/mlu/bridges/interpolate_op_test.cc index 0e99da64358e6590af0b8e57dc3ddec142c8d0f0..13f5bfb31bc2856c2da467e8511cd5d2c973492d 100644 --- a/lite/kernels/mlu/bridges/interpolate_op_test.cc +++ b/lite/kernels/mlu/bridges/interpolate_op_test.cc @@ -85,7 +85,7 @@ void BilinearInterpRef(const lite::Tensor* x, int channel_size = x_dims[1]; auto x_h = x_dims[2]; auto x_w = x_dims[3]; - CHECK_EQ(x_dims.size(), 4); + CHECK_EQ(x_dims.size(), 4u); auto out_dims = out->dims(); int out_h = out_dims[2]; diff --git a/lite/kernels/mlu/bridges/utility.cc b/lite/kernels/mlu/bridges/utility.cc index f18a46518c09a69803a069ce40c1d7e3c01e9eca..cd78553a652433fc41334a6bff5575031f5125e0 100644 --- a/lite/kernels/mlu/bridges/utility.cc +++ b/lite/kernels/mlu/bridges/utility.cc @@ -59,10 +59,10 @@ void dequant(float* dst, size_t size, size_t size_in, std::vector scales) { - for (int out = 0; out < size_o; ++out) { - for (int s = 0; s < size; ++s) { + for (size_t out = 0; out < size_o; ++out) { + for (size_t s = 0; s < size; ++s) { auto scale = scales[s]; - for (int in = 0; in < size_in; ++in) { + for (size_t in = 0; in < size_in; ++in) { int idx = in + s * size_in + out * size_in * size; dst[idx] = static_cast(src[idx]) * scale; } diff --git a/lite/kernels/npu/bridges/engine.cc b/lite/kernels/npu/bridges/engine.cc index 9961d5f17e285350414f1c8ae72fe19d760312de..6e639a37badf45e4a01f542011f0149e93e06772 100644 --- a/lite/kernels/npu/bridges/engine.cc +++ b/lite/kernels/npu/bridges/engine.cc @@ -30,7 +30,7 @@ int Engine::BuildOriginProgram() { // TODO(hong19860320) The block_desc need to be divided into subgraphs during // the exection time. But only see them as a subgraph now. origin_program_.clear(); - for (int op_idx = 0; op_idx < block_desc_->OpsSize(); op_idx++) { + for (size_t op_idx = 0; op_idx < block_desc_->OpsSize(); op_idx++) { auto op_desc = block_desc_->GetOp(op_idx); CHECK(op_desc); std::string op_type = op_desc->Type(); @@ -46,7 +46,7 @@ int Engine::BuildOriginProgram() { VLOG(3) << "Found the attr '" << kKernelTypeAttr << "': " << kernel_type << " for " << op_type; auto kernels = op->CreateKernels({place}); - CHECK_GT(kernels.size(), 0) << "No kernels found for " << op_type; + CHECK_GT(kernels.size(), 0u) << "No kernels found for " << op_type; auto it = std::find_if( kernels.begin(), kernels.end(), [&](std::unique_ptr& it) { return it->alias() == alias; @@ -96,7 +96,7 @@ int Engine::Build() { } bool Engine::InputShapeChanged() { - for (int i = 0; i < origin_itensors_.size(); i++) { + for (size_t i = 0; i < origin_itensors_.size(); i++) { if (origin_itensors_[i]->dims() != origin_idims_[i]) { return true; } diff --git a/lite/kernels/x86/elementwise_op_function.h b/lite/kernels/x86/elementwise_op_function.h index 42ea38d979e39f97a8aef971370c83303c53c48f..c49f21d1a8ee20db249274874e21accd00dfbcd1 100644 --- a/lite/kernels/x86/elementwise_op_function.h +++ b/lite/kernels/x86/elementwise_op_function.h @@ -64,14 +64,14 @@ inline void get_mid_dims(const lite::DDim &x_dims, for (int i = 0; i < axis; ++i) { (*pre) *= x_dims[i]; } - for (int i = 0; i < y_dims.size(); ++i) { + for (size_t i = 0; i < y_dims.size(); ++i) { if (x_dims[i + axis] != y_dims[i]) { // only support single y_dims[i] = 1 now. PADDLE_ENFORCE_EQ( *mid_flag, 0, "Broadcast support y_dims with single 1."); PADDLE_ENFORCE_EQ(y_dims[i], 1, "Broadcast dimension mismatch."); // m*n*k m*1*k - for (int j = 0; j < i; ++j) { + for (size_t j = 0; j < i; ++j) { (*pre) *= y_dims[j]; } *n = std::max(x_dims[i + axis], y_dims[i]); @@ -82,11 +82,11 @@ inline void get_mid_dims(const lite::DDim &x_dims, (*n) *= y_dims[i]; } if (*mid_flag) { - for (int i = mid + 1; i < x_dims.size(); ++i) { + for (size_t i = mid + 1; i < x_dims.size(); ++i) { (*post) *= x_dims[i]; } } else { - for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) { + for (size_t i = axis + y_dims.size(); i < x_dims.size(); ++i) { (*post) *= x_dims[i]; } } @@ -95,13 +95,13 @@ inline void get_mid_dims(const lite::DDim &x_dims, (*pre) *= x_dims[i]; } - for (int i = 0; i < y_dims.size(); ++i) { + for (size_t i = 0; i < y_dims.size(); ++i) { PADDLE_ENFORCE_EQ( x_dims[i + axis], y_dims[i], "Broadcast dimension mismatch."); (*n) *= y_dims[i]; } - for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) { + for (size_t i = axis + y_dims.size(); i < x_dims.size(); ++i) { (*post) *= x_dims[i]; } } @@ -116,7 +116,7 @@ inline lite::DDim trim_trailing_singular_dims(const lite::DDim &dims) { std::vector trim_dims; trim_dims.resize(actual_dims_size); - for (int i = 0; i < actual_dims_size; ++i) { + for (size_t i = 0; i < actual_dims_size; ++i) { trim_dims[i] = dims[i]; } if (trim_dims.size() == 0) { diff --git a/lite/kernels/x86/fill_constant_batch_size_like_compute_test.cc b/lite/kernels/x86/fill_constant_batch_size_like_compute_test.cc index e324b599836059ca3560593950f689eabd393ea0..16bec18a1c1c4d0075e1ed1dcc4f3a3462917868 100644 --- a/lite/kernels/x86/fill_constant_batch_size_like_compute_test.cc +++ b/lite/kernels/x86/fill_constant_batch_size_like_compute_test.cc @@ -71,7 +71,7 @@ TEST(fill_constant_batch_size_like_x86, run_test) { std::vector ref_results{ 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5}; - for (int i = 0; i < ref_results.size(); i++) { + for (size_t i = 0; i < ref_results.size(); i++) { EXPECT_NEAR(out_data[i], ref_results[i], 1e-3); } } diff --git a/lite/kernels/x86/gather_compute.h b/lite/kernels/x86/gather_compute.h index bd01d9da3af1640770838c262dcd848b557d40c3..e63332e87a079e234a0fc72ee2756afd2ebdd94c 100644 --- a/lite/kernels/x86/gather_compute.h +++ b/lite/kernels/x86/gather_compute.h @@ -56,7 +56,7 @@ void CPUGather(const lite::Tensor* src, // slice size int slice_size = 1; - for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + for (size_t i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; const size_t slice_bytes = slice_size * sizeof(T); for (int64_t i = 0; i < index_size; ++i) { diff --git a/lite/kernels/x86/layer_norm_compute_test.cc b/lite/kernels/x86/layer_norm_compute_test.cc index a5244bcc6f2c561b5eac2fc74b1cc8c5f12417d6..d39500a5e8827230ddeecd6bbe30f8c0a47ee929 100644 --- a/lite/kernels/x86/layer_norm_compute_test.cc +++ b/lite/kernels/x86/layer_norm_compute_test.cc @@ -108,7 +108,7 @@ TEST(layer_norm_x86, run_test) { for (int i = 0; i < begin_norm_axis; ++i) { pre *= x_shape[i]; } - for (int i = begin_norm_axis; i < x_shape.size(); ++i) { + for (size_t i = begin_norm_axis; i < x_shape.size(); ++i) { post *= x_shape[i]; } std::vector scale_shape({post}); diff --git a/lite/kernels/x86/sequence_expand_as_compute.h b/lite/kernels/x86/sequence_expand_as_compute.h index badbfac14cbeb120d23ea1174a9fc3a218b2224f..4ab1aeae02f5f54d2f9542520b4acda57c89eec2 100644 --- a/lite/kernels/x86/sequence_expand_as_compute.h +++ b/lite/kernels/x86/sequence_expand_as_compute.h @@ -66,8 +66,8 @@ class SequenceExpandAsCompute auto *out = param.out; auto &y_lod = y->lod(); - CHECK_EQ(y_lod.size(), 1); - CHECK_GT(y_lod[0].size(), 1); + CHECK_EQ(y_lod.size(), 1u); + CHECK_GT(y_lod[0].size(), 1u); out->template mutable_data(); diff --git a/lite/kernels/x86/sequence_reverse_compute_test.cc b/lite/kernels/x86/sequence_reverse_compute_test.cc index 4b84241c8b19e3db57dd7ef6339496191a7486be..adf9981b242bfbb7f60989369715354cc2043685 100644 --- a/lite/kernels/x86/sequence_reverse_compute_test.cc +++ b/lite/kernels/x86/sequence_reverse_compute_test.cc @@ -30,7 +30,7 @@ static void sequence_reverse_ref(const lite::Tensor* x, lite::Tensor* y) { auto seq_offset = x->lod()[x->lod().size() - 1]; int width = x->numel() / x->dims()[0]; auto* y_data = y->mutable_data(); - for (int i = 0; i < seq_offset.size() - 1; ++i) { + for (size_t i = 0; i < seq_offset.size() - 1; ++i) { auto start_pos = seq_offset[i]; auto end_pos = seq_offset[i + 1]; for (auto pos = start_pos; pos < end_pos; ++pos) { diff --git a/lite/kernels/x86/shape_compute.h b/lite/kernels/x86/shape_compute.h index e78684e629727fc7023e6ae4c3385f9c58d48a6b..bceae79ea17665c02981666aef0cdf7827f0c45d 100644 --- a/lite/kernels/x86/shape_compute.h +++ b/lite/kernels/x86/shape_compute.h @@ -31,7 +31,7 @@ class ShapeCompute : public KernelLite { // auto& context = context_->As(); auto out_data = param.Out->template mutable_data(); auto in_dims = param.X->dims(); - for (int i = 0; i < in_dims.size(); ++i) { + for (size_t i = 0; i < in_dims.size(); ++i) { out_data[i] = in_dims[i]; } } diff --git a/lite/kernels/x86/slice_compute.h b/lite/kernels/x86/slice_compute.h index 0bb9fd66eb60ca1df698dbe806bc8e9ee2a69f0e..ad30215691cde66ab1c7c8c57930fc6d58de7cd5 100644 --- a/lite/kernels/x86/slice_compute.h +++ b/lite/kernels/x86/slice_compute.h @@ -118,7 +118,7 @@ void slice_compute(const lite::Tensor* in, out_dims[decrease_axis[i]] = 0; } - for (int i = 0; i < out_dims.size(); ++i) { + for (size_t i = 0; i < out_dims.size(); ++i) { if (out_dims[i] != 0) { new_out_shape.push_back(out_dims[i]); } diff --git a/lite/kernels/x86/slice_compute_test.cc b/lite/kernels/x86/slice_compute_test.cc index 8d35534f824504a965f8ded0ef82878c03739a36..a69bfc9a43c3a83f52dab8e2752921be1069252b 100644 --- a/lite/kernels/x86/slice_compute_test.cc +++ b/lite/kernels/x86/slice_compute_test.cc @@ -34,10 +34,10 @@ static void slice_ref(const float* input, std::vector real_starts(in_dims.size(), 0); std::vector real_ends(in_dims.size(), 0); std::vector real_step(in_dims.size(), 0); - for (int i = 0; i < in_dims.size(); i++) { + for (size_t i = 0; i < in_dims.size(); i++) { real_ends[i] = in_dims[i]; } - for (int i = 0; i < axes.size(); i++) { + for (size_t i = 0; i < axes.size(); i++) { int dim_value = in_dims[axes[i]]; if (dim_value > 0) { int start = starts[i] < 0 ? (starts[i] + dim_value) : starts[i]; @@ -52,11 +52,11 @@ static void slice_ref(const float* input, } const int LEN = in_dims.size(); int dst_step[LEN]; - for (int i = 0; i < in_dims.size(); ++i) { + for (size_t i = 0; i < in_dims.size(); ++i) { dst_step[i] = 1; } int src_step[LEN]; - for (int i = 0; i < in_dims.size(); ++i) { + for (size_t i = 0; i < in_dims.size(); ++i) { src_step[i] = 1; } int out_num = out_dims[in_dims.size() - 1]; @@ -69,7 +69,7 @@ static void slice_ref(const float* input, for (int dst_id = 0; dst_id < out_num; dst_id++) { int src_id = 0; int index_id = dst_id; - for (int j = 0; j < out_dims.size(); j++) { + for (size_t j = 0; j < out_dims.size(); j++) { int cur_id = index_id / dst_step[j]; index_id = index_id % dst_step[j]; src_id += (cur_id + real_starts[j]) * src_step[j]; @@ -409,7 +409,7 @@ void test_tensor_case3(lite::Tensor x, lite::Tensor out) { lite::Tensor starts_tensor, ends_tensor; starts_tensor.Resize(DDim({3})); ends_tensor.Resize(DDim({3})); - for (int i = 0; i < starts.size(); ++i) { + for (size_t i = 0; i < starts.size(); ++i) { starts_tensor.mutable_data()[i] = starts[i]; ends_tensor.mutable_data()[i] = ends[i]; } diff --git a/lite/kernels/x86/stack_compute.h b/lite/kernels/x86/stack_compute.h index 08b3515948750a5cb36627f0349c852e597619e6..6921430224a77adad0150e271ca634433700e5d6 100644 --- a/lite/kernels/x86/stack_compute.h +++ b/lite/kernels/x86/stack_compute.h @@ -47,7 +47,7 @@ class StackCompute : public KernelLite { int pre = 1, post = 1; auto dim = x[0]->dims(); for (int i = 0; i < axis; ++i) pre *= dim[i]; - for (int i = axis; i < dim.size(); ++i) post *= dim[i]; + for (size_t i = axis; i < dim.size(); ++i) post *= dim[i]; auto x_data_arr = x_datas.data(); diff --git a/lite/kernels/x86/var_conv_2d_compute.h b/lite/kernels/x86/var_conv_2d_compute.h index 1bed39f479c87636ff217c8fd7234ea2c5bd5904..36361340ae2b2a32604fe59f3bc73c785a89028f 100644 --- a/lite/kernels/x86/var_conv_2d_compute.h +++ b/lite/kernels/x86/var_conv_2d_compute.h @@ -44,7 +44,7 @@ class VarConv2DCompute : public KernelLite { // 2-D lod info. // const auto& offset_x = in_col->lod()[0]; // const auto& offset_y = in_row->lod()[0]; - CHECK_EQ(param.X->lod().size(), 3) << "input lod size should be 3!"; + CHECK_EQ(param.X->lod().size(), 3u) << "input lod size should be 3!"; const auto& offset_y = param.X->lod()[1]; const auto& offset_x = param.X->lod()[2]; diff --git a/lite/model_parser/model_parser_test.cc b/lite/model_parser/model_parser_test.cc index d9c0f501c37862236cacd2624dc70c8cf1dacc86..16794a525142ad1ad76695dd4aaac003cba32daa 100644 --- a/lite/model_parser/model_parser_test.cc +++ b/lite/model_parser/model_parser_test.cc @@ -107,7 +107,7 @@ TEST(ModelParser, LoadParamNaive) { ASSERT_EQ(bg_lod, tensor.lod()); ASSERT_EQ(tensor.data_size(), size); auto* data = tensor.data(); - for (int i = 0; i < size; ++i) { + for (size_t i = 0; i < size; ++i) { EXPECT_NEAR(bg_data[i], data[i], 1e-6); } } diff --git a/lite/operators/elementwise_ops.cc b/lite/operators/elementwise_ops.cc index f4debc39a0d480f38e6d37e8e60d516def7f0b55..3996c933407233538a62ae9e197978f799ce06b0 100644 --- a/lite/operators/elementwise_ops.cc +++ b/lite/operators/elementwise_ops.cc @@ -35,7 +35,8 @@ bool ElementwiseOp::InferShapeImpl() const { auto out_lod = param_.Out->mutable_lod(); *out_lod = param_.X->lod(); } else { - int max_dim = (x_dim.size() > y_dim.size() ? x_dim.size() : y_dim.size()); + size_t max_dim = + (x_dim.size() > y_dim.size() ? x_dim.size() : y_dim.size()); int axis = param_.axis; axis = (axis == -1 ? std::abs(static_cast(x_dim.size() - y_dim.size())) : axis); @@ -48,12 +49,12 @@ bool ElementwiseOp::InferShapeImpl() const { y_dims_array[i] = 1; } if (axis + y_dim.size() < max_dim) { - for (int i = axis + y_dim.size(); i < max_dim; ++i) { + for (size_t i = axis + y_dim.size(); i < max_dim; ++i) { y_dims_array[i] = 1; } } x_dims_array = x_dim.Vectorize(); - for (int i = 0; i < y_dim.size(); ++i) { + for (size_t i = 0; i < y_dim.size(); ++i) { y_dims_array[i + axis] = y_dim[i]; } } else { @@ -61,16 +62,16 @@ bool ElementwiseOp::InferShapeImpl() const { x_dims_array[i] = 1; } if (axis + x_dim.size() < max_dim) { - for (int i = axis + x_dim.size(); i < max_dim; ++i) { + for (size_t i = axis + x_dim.size(); i < max_dim; ++i) { x_dims_array[i] = 1; } } y_dims_array = y_dim.Vectorize(); - for (int i = 0; i < x_dim.size(); ++i) { + for (size_t i = 0; i < x_dim.size(); ++i) { x_dims_array[i + axis] = x_dim[i]; } } - for (int i = 0; i < max_dim; i++) { + for (size_t i = 0; i < max_dim; i++) { if (x_dims_array[i] == -1 || y_dims_array[i] == -1) { out_dims_array[i] = -1; } else { diff --git a/lite/operators/expand_op.cc b/lite/operators/expand_op.cc index 8e40a3b236609b1e83b5224efb462a1f803764df..ccb26e71dbe5dac18ecc5220d3697c737aee1c91 100644 --- a/lite/operators/expand_op.cc +++ b/lite/operators/expand_op.cc @@ -27,7 +27,7 @@ bool ExpandOpLite::CheckShape() const { CHECK_EQ(expand_size, x_dims_size) << "The number of expand_times size must be qual to the rank of " "Input(X)."; - CHECK_LE(param_.X->dims().size(), 6) + CHECK_LE(param_.X->dims().size(), 6u) << "The rank of Input(X) must not be greater than 6."; return true; } diff --git a/lite/operators/fill_constant_batch_size_like_op.cc b/lite/operators/fill_constant_batch_size_like_op.cc index 5b0ebb38e717afea4dabe011c0161248e2113a02..b14d8c59a4ecc857d84ff4debac1740ea6fddd20 100644 --- a/lite/operators/fill_constant_batch_size_like_op.cc +++ b/lite/operators/fill_constant_batch_size_like_op.cc @@ -22,7 +22,7 @@ namespace operators { bool FillConstantBatchSizeLikeOp::CheckShape() const { CHECK(param_.out); CHECK(param_.input); - CHECK_GT(param_.shape.size(), 0); + CHECK_GT(param_.shape.size(), 0u); CHECK_GE(param_.input_dim_idx, 0); CHECK_GE(param_.output_dim_idx, 0); return true; diff --git a/lite/operators/fill_constant_op.cc b/lite/operators/fill_constant_op.cc index 565c4bbd16e01af340e728e28866268c1a845760..929966d57e05c368ce0e919804270ddacc9c8f93 100644 --- a/lite/operators/fill_constant_op.cc +++ b/lite/operators/fill_constant_op.cc @@ -34,7 +34,7 @@ bool FillConstantOp::InferShapeImpl() const { out_shape.push_back(shape_tensor_data[i]); } } else if (!shape_tensor_list.empty()) { - for (int i = 0; i < shape_tensor_list.size(); i++) { + for (size_t i = 0; i < shape_tensor_list.size(); i++) { out_shape.push_back(shape_tensor_list[i]->data()[0]); } } else if (!param_.shape.empty()) { diff --git a/lite/operators/flatten_op.cc b/lite/operators/flatten_op.cc index b270dbf52f9a19f574e6f8967ff93e3a013e5737..300d516d6aa86799466ce6b02fb06212df1122f4 100644 --- a/lite/operators/flatten_op.cc +++ b/lite/operators/flatten_op.cc @@ -32,7 +32,7 @@ bool FlattenOp::InferShapeImpl() const { *out_lod = param_.x->lod(); int64_t outer = 1, inner = 1; - for (int i = 0; i < x_dims.size(); ++i) { + for (size_t i = 0; i < x_dims.size(); ++i) { if (i < axis_) { outer *= x_dims[i]; } else { diff --git a/lite/operators/interpolate_op.cc b/lite/operators/interpolate_op.cc index 0ef22e42903842ac41e9aca010f78796b5a32fcc..a96a602764ce5ea9ac7707ea43b58476e54d23f5 100644 --- a/lite/operators/interpolate_op.cc +++ b/lite/operators/interpolate_op.cc @@ -48,14 +48,14 @@ bool InterpolateOp::InferShapeImpl() const { auto OutSize = param_.OutSize; auto Scale = param_.Scale; if (!SizeTensor.empty()) { - CHECK_EQ(SizeTensor.size(), 2) + CHECK_EQ(SizeTensor.size(), 2u) << "Input(SizeTensor)'size of Op(interpolate) must be 2. " "Attr(out_shape)'s length must be 2 for 4-D input tensor."; out_h = SizeTensor[0]->data()[0]; out_w = SizeTensor[1]->data()[0]; } else if (OutSize) { auto OutSize_dims = OutSize->dims(); - CHECK_EQ(OutSize_dims.size(), 1) << "Input(OutSize)'s dims size must be 1"; + CHECK_EQ(OutSize_dims.size(), 1u) << "Input(OutSize)'s dims size must be 1"; CHECK_EQ(OutSize_dims[0], 2) << "OutSize's dim[0] must be 2"; auto OutSize_data = OutSize->data(); out_h = OutSize_data[0]; diff --git a/lite/operators/pool_op.h b/lite/operators/pool_op.h index 3fcf37e6348628d489e9a2097e2c8dac7eba3e3c..97f4a8a0083550fdcb0bc2d011e5e33d2d02011d 100644 --- a/lite/operators/pool_op.h +++ b/lite/operators/pool_op.h @@ -105,7 +105,7 @@ inline void UpdatePadding(std::vector *paddings, const std::vector &ksize) { // when padding_algorithm is "VALID" or "SAME" if (padding_algorithm == "SAME") { - for (int i = 0; i < strides.size(); ++i) { + for (size_t i = 0; i < strides.size(); ++i) { int out_size = (data_dims[i + 2] + strides[i] - 1) / strides[i]; int pad_sum = std::max((out_size - 1) * strides[i] + ksize[i] - data_dims[i + 2], diff --git a/lite/operators/reduce_mean_op.cc b/lite/operators/reduce_mean_op.cc index c5baca5e87068d267ada21854b7769bf2bc19461..0c788f35db3ce49657e6ad176f0d5f9c3c466ada 100644 --- a/lite/operators/reduce_mean_op.cc +++ b/lite/operators/reduce_mean_op.cc @@ -29,7 +29,7 @@ bool ReduceMeanOp::CheckShape() const { auto x_dims = param_.X->dims(); int x_rank = x_dims.size(); if (dims.size() != 0) { - for (int i = 0; i < dims.size(); i++) { + for (size_t i = 0; i < dims.size(); i++) { if (dims[i] < 0) { dims[i] = x_rank + dims[i]; } @@ -46,7 +46,7 @@ bool ReduceMeanOp::InferShapeImpl() const { bool keep_dim = param_.keep_dim; auto x_rank = x_dims.size(); if (dims.size() != 0) { - for (int i = 0; i < dims.size(); i++) { + for (size_t i = 0; i < dims.size(); i++) { if (dims[i] < 0) { dims[i] = x_rank + dims[i]; } @@ -65,7 +65,7 @@ bool ReduceMeanOp::InferShapeImpl() const { out_dims.push_back(1); } } else { - for (int i = 0; i < x_dims.size(); i++) { + for (size_t i = 0; i < x_dims.size(); i++) { out_dims.push_back(x_dims[i]); } if (keep_dim) { diff --git a/lite/operators/reshape_op.cc b/lite/operators/reshape_op.cc index 5cbdd8edc31d7d45ed81176397c9b003d1e346ae..32bc91a3a0b9b852024e2e0f2ea36585e2a29892 100644 --- a/lite/operators/reshape_op.cc +++ b/lite/operators/reshape_op.cc @@ -70,7 +70,7 @@ bool ReshapeOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) { param_.shape_tensor_vct.push_back(var->GetMutable()); } } - CHECK_GT(param_.shape_tensor_vct.size(), 0) + CHECK_GT(param_.shape_tensor_vct.size(), 0u) << "ShapeError: When `shape` in ReshapeOp is a list or tuple " "which contains Tensor, the shape's size can't be zero. " "But received shape's size is " @@ -145,7 +145,7 @@ std::vector ValidateShape(const std::vector &shape, << "Only one input dimension of Attr(shape) can be unknown."; unk_dim_idx = i; } else if (shape[i] == copy_dim_val) { - CHECK_LT(static_cast(i), input_dims.size()) + CHECK_LT(i, input_dims.size()) << "The index of dimension to copy from input shape must be less " "than the size of input shape."; } else { diff --git a/lite/operators/search_fc_op.cc b/lite/operators/search_fc_op.cc index 3c64f24e48f750b367b75431333401329721a9b9..71e62c2ae729b4e1516a219888b9af3f7d994428 100644 --- a/lite/operators/search_fc_op.cc +++ b/lite/operators/search_fc_op.cc @@ -41,11 +41,11 @@ bool SearchFcOpLite::CheckShape() const { CHECK_OR_FALSE(param_.Out); auto x_dims = param_.X->dims(); - CHECK_EQ(x_dims.size(), 2) << "The rank of X(Input) should be 2."; + CHECK_EQ(x_dims.size(), 2u) << "The rank of X(Input) should be 2."; auto w_dims = param_.W->dims(); - CHECK_EQ(w_dims.size(), 2) << "W should be 2-D tensor."; + CHECK_EQ(w_dims.size(), 2u) << "W should be 2-D tensor."; auto b_dims = param_.b->dims(); - CHECK_EQ(b_dims.size(), 1) << "b should be 1-D tensor."; + CHECK_EQ(b_dims.size(), 1u) << "b should be 1-D tensor."; CHECK_EQ(w_dims[1], x_dims[1]) << "wrong shape: w_dims[1] != x_dims[1]"; return true; } diff --git a/lite/operators/slice_op.cc b/lite/operators/slice_op.cc index cf7d94535cce5fa32d0f917c9d39e4746cee1c30..ecbcc5c2c5925d320c0334889634e57ed894695f 100644 --- a/lite/operators/slice_op.cc +++ b/lite/operators/slice_op.cc @@ -22,7 +22,7 @@ namespace operators { bool SliceOp::CheckShape() const { CHECK_OR_FALSE(param_.X); CHECK_OR_FALSE(param_.Out); - CHECK_LT(param_.X->dims().size(), 7) + CHECK_LT(param_.X->dims().size(), 7u) << "The rank of input X should be less than 7"; return true; } @@ -67,7 +67,7 @@ bool SliceOp::InferShapeImpl() const { } out_dims[decrease_axis[i]] = 0; } - for (int i = 0; i < out_dims.size(); ++i) { + for (size_t i = 0; i < out_dims.size(); ++i) { if (out_dims[i] != 0) { new_out_shape.push_back(out_dims[i]); } @@ -108,7 +108,7 @@ bool SliceOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) { // The priority: StartsTensor > StartsTensorList > attr(starts). // The priority: EndsTensor > EndsTensorList > attr(ends). - int starts_size, ends_size; + size_t starts_size, ends_size; if (opdesc.HasAttr("starts")) { param_.starts = opdesc.GetAttr>("starts"); } @@ -129,7 +129,7 @@ bool SliceOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) { param_.StartsTensorList.push_back( scope->FindVar(var)->GetMutable()); } - CHECK_GT(param_.StartsTensorList.size(), 0) + CHECK_GT(param_.StartsTensorList.size(), 0u) << "StartsTensorList size can't be zero"; starts_size = param_.StartsTensorList.size(); } @@ -141,7 +141,7 @@ bool SliceOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) { param_.EndsTensorList.push_back( scope->FindVar(var)->GetMutable()); } - CHECK_GT(param_.EndsTensorList.size(), 0) + CHECK_GT(param_.EndsTensorList.size(), 0u) << "EndsTensorList size can't be zero"; ends_size = param_.EndsTensorList.size(); } diff --git a/lite/operators/split_op.cc b/lite/operators/split_op.cc index 71deb5631dd3523ebb0367b7db5e4049b785be7b..14cff7d692e3aaa37d95233931760f37c31e4526 100644 --- a/lite/operators/split_op.cc +++ b/lite/operators/split_op.cc @@ -67,7 +67,7 @@ bool SplitOp::InferShapeImpl() const { axis = param_.axis_tensor->data()[0]; } - for (int j = 0; j < outs_dims.size(); ++j) { + for (size_t j = 0; j < outs_dims.size(); ++j) { outs[j]->Resize(outs_dims[j]); } diff --git a/lite/operators/squeeze_op.cc b/lite/operators/squeeze_op.cc index 633a6b4d4e45fd30bd72c8dcdfbbd96b8a8e8ebe..c34ad06debb0c4bb99d083bc7938ea26b2dcac9f 100644 --- a/lite/operators/squeeze_op.cc +++ b/lite/operators/squeeze_op.cc @@ -28,7 +28,7 @@ static DDim GetOutputShape(const std::vector &squeeze_dims, // Determines number of dimensions of output tensor after squeeze. // Mark and count the dimensions need to be squeezed if (num_squeeze_dims == 0) { - for (int idx = 0; idx < in_dims.size(); ++idx) { + for (size_t idx = 0; idx < in_dims.size(); ++idx) { if (in_dims[idx] == 1) { should_squeeze[idx] = true; ++cnt_squeezed_dims; @@ -57,7 +57,7 @@ static DDim GetOutputShape(const std::vector &squeeze_dims, // Make output dimensions std::vector output_shape(in_dims.size() - cnt_squeezed_dims, 0); - for (int in_idx = 0, out_idx = 0; in_idx < in_dims.size(); ++in_idx) { + for (size_t in_idx = 0, out_idx = 0; in_idx < in_dims.size(); ++in_idx) { if (!should_squeeze[in_idx]) { output_shape[out_idx++] = in_dims[in_idx]; } diff --git a/lite/operators/unsqueeze_op.cc b/lite/operators/unsqueeze_op.cc index b5ae90248abb4f2496a4dbca1c12317cf3a7d325..0a7487d34eeb6fe149f956e2f48bdb411a690f14 100644 --- a/lite/operators/unsqueeze_op.cc +++ b/lite/operators/unsqueeze_op.cc @@ -75,7 +75,7 @@ bool UnsqueezeOp::InferShapeImpl() const { final_axes = std::vector(axes_tensor_data, axes_tensor_data + axes_tensor->numel()); } else if (!axes_tensor_vct.empty()) { - for (int i = 0; i < axes_tensor_vct.size(); i++) { + for (size_t i = 0; i < axes_tensor_vct.size(); i++) { final_axes.push_back(axes_tensor_vct[i]->data()[0]); } } else {