From c0656dcb1a41c6c95f7c9bf49fe9b4d8c6ec8f7c Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Thu, 28 Nov 2019 12:16:13 +0800 Subject: [PATCH] remove -Wno-error=sign-compare, make warning as error (#21358) * remove -Wno-error=sign-compare, make warning as error test=develop test=document_fix * fix exist compile warning test=develop --- cmake/flags.cmake | 2 -- paddle/fluid/framework/data_feed.cc | 8 ++++---- paddle/fluid/framework/data_set.cc | 4 ++-- paddle/fluid/framework/data_set.h | 2 +- paddle/fluid/framework/dist_multi_trainer.cc | 2 +- paddle/fluid/framework/downpour_worker.cc | 16 +++++++++------- paddle/fluid/framework/pull_dense_worker.cc | 13 +++++++------ paddle/fluid/imperative/tests/test_tracer.cc | 12 ++++++------ paddle/fluid/operators/concat_op.h | 2 +- paddle/fluid/operators/conv_op.cc | 2 +- paddle/fluid/operators/conv_op.h | 6 +++--- .../fluid/operators/distributed/communicator.cc | 14 +++++++------- .../fluid/operators/distributed/communicator.h | 2 +- .../operators/distributed/communicator_test.cc | 4 ++-- .../operators/distributed/parameter_prefetch.cc | 6 +++--- .../operators/distributed/parameter_send.cc | 4 ++-- .../distributed/request_handler_impl.cc | 2 +- paddle/fluid/operators/fused/conv_fusion_op.cc | 2 +- paddle/fluid/operators/jit/gen/matmul.cc | 2 +- paddle/fluid/operators/math/padding.h | 4 ++-- paddle/fluid/operators/pool_op.h | 4 ++-- paddle/fluid/operators/split_op.h | 2 +- paddle/fluid/operators/uniform_random_op.h | 2 +- 23 files changed, 59 insertions(+), 58 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index d67098e99e3..27ec2eca6bb 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -148,7 +148,6 @@ set(COMMON_FLAGS -Wno-unused-parameter -Wno-unused-function -Wno-error=literal-suffix - -Wno-error=sign-compare -Wno-error=unused-local-typedefs -Wno-error=parentheses-equality # Warnings in pybind11 -Wno-error=ignored-attributes # Warnings in Eigen, gcc 6.3 @@ -183,7 +182,6 @@ set(GPU_COMMON_FLAGS -Wdelete-non-virtual-dtor -Wno-unused-parameter -Wno-unused-function - -Wno-error=sign-compare -Wno-error=literal-suffix -Wno-error=unused-local-typedefs -Wno-error=unused-function # Warnings in Numpy Header. diff --git a/paddle/fluid/framework/data_feed.cc b/paddle/fluid/framework/data_feed.cc index c9c663926e6..cdf4e55dd5e 100644 --- a/paddle/fluid/framework/data_feed.cc +++ b/paddle/fluid/framework/data_feed.cc @@ -391,7 +391,7 @@ void MultiSlotDataFeed::Init( use_slots_is_dense_.push_back(slot.is_dense()); std::vector local_shape; if (slot.is_dense()) { - for (size_t j = 0; j < slot.shape_size(); ++j) { + for (int j = 0; j < slot.shape_size(); ++j) { if (slot.shape(j) > 0) { total_dims_without_inductive_[i] *= slot.shape(j); } @@ -400,7 +400,7 @@ void MultiSlotDataFeed::Init( } } } - for (size_t j = 0; j < slot.shape_size(); ++j) { + for (int j = 0; j < slot.shape_size(); ++j) { local_shape.push_back(slot.shape(j)); } use_slots_shape_.push_back(local_shape); @@ -736,7 +736,7 @@ void MultiSlotInMemoryDataFeed::Init( use_slots_is_dense_.push_back(slot.is_dense()); std::vector local_shape; if (slot.is_dense()) { - for (size_t j = 0; j < slot.shape_size(); ++j) { + for (int j = 0; j < slot.shape_size(); ++j) { if (slot.shape(j) > 0) { total_dims_without_inductive_[i] *= slot.shape(j); } @@ -745,7 +745,7 @@ void MultiSlotInMemoryDataFeed::Init( } } } - for (size_t j = 0; j < slot.shape_size(); ++j) { + for (int j = 0; j < slot.shape_size(); ++j) { local_shape.push_back(slot.shape(j)); } use_slots_shape_.push_back(local_shape); diff --git a/paddle/fluid/framework/data_set.cc b/paddle/fluid/framework/data_set.cc index 3e1f494a749..b904daa84c4 100644 --- a/paddle/fluid/framework/data_set.cc +++ b/paddle/fluid/framework/data_set.cc @@ -638,7 +638,7 @@ void MultiSlotDataset::MergeByInsId() { } auto multi_slot_desc = data_feed_desc_.multi_slot_desc(); std::vector use_slots; - for (size_t i = 0; i < multi_slot_desc.slots_size(); ++i) { + for (int i = 0; i < multi_slot_desc.slots_size(); ++i) { const auto& slot = multi_slot_desc.slots(i); if (slot.is_used()) { use_slots.push_back(slot.name()); @@ -828,7 +828,7 @@ void MultiSlotDataset::SlotsShuffle( timeline.Start(); auto multi_slot_desc = data_feed_desc_.multi_slot_desc(); std::set index_slots; - for (size_t i = 0; i < multi_slot_desc.slots_size(); ++i) { + for (int i = 0; i < multi_slot_desc.slots_size(); ++i) { std::string cur_slot = multi_slot_desc.slots(i).name(); if (slots_to_replace.find(cur_slot) != slots_to_replace.end()) { index_slots.insert(i); diff --git a/paddle/fluid/framework/data_set.h b/paddle/fluid/framework/data_set.h index 7c8fa461550..aa82b66305e 100644 --- a/paddle/fluid/framework/data_set.h +++ b/paddle/fluid/framework/data_set.h @@ -215,7 +215,7 @@ class DatasetImpl : public Dataset { bool merge_by_insid_; bool parse_ins_id_; bool parse_content_; - int merge_size_; + size_t merge_size_; bool slots_shuffle_fea_eval_ = false; int preload_thread_num_; std::mutex global_index_mutex_; diff --git a/paddle/fluid/framework/dist_multi_trainer.cc b/paddle/fluid/framework/dist_multi_trainer.cc index 98ff53deb7b..9057549ed5c 100644 --- a/paddle/fluid/framework/dist_multi_trainer.cc +++ b/paddle/fluid/framework/dist_multi_trainer.cc @@ -152,7 +152,7 @@ void DistMultiTrainer::Finalize() { for (auto &th : threads_) { th.join(); } - for (int i = 0; i < need_merge_var_names_.size(); i++) { + for (size_t i = 0; i < need_merge_var_names_.size(); i++) { Variable *root_var = root_scope_->FindVar(need_merge_var_names_[i]); if (root_var == nullptr) { continue; diff --git a/paddle/fluid/framework/downpour_worker.cc b/paddle/fluid/framework/downpour_worker.cc index 60dbe3c2f94..135667cc061 100644 --- a/paddle/fluid/framework/downpour_worker.cc +++ b/paddle/fluid/framework/downpour_worker.cc @@ -180,7 +180,7 @@ std::pair GetTensorBound(LoDTensor* tensor, int index) { } } -bool CheckValidOutput(LoDTensor* tensor, int batch_size) { +bool CheckValidOutput(LoDTensor* tensor, size_t batch_size) { auto& dims = tensor->dims(); if (dims.size() != 2) return false; if (tensor->lod().size() != 0) { @@ -189,7 +189,7 @@ bool CheckValidOutput(LoDTensor* tensor, int batch_size) { return false; } } else { - if (dims[0] != batch_size) { + if (dims[0] != static_cast(batch_size)) { return false; } } @@ -329,7 +329,8 @@ void DownpourWorker::FillSparseValue(size_t table_idx) { } memcpy(ptr + table.emb_dim() * index, fea_value[fea_idx].data(), sizeof(float) * table.emb_dim()); - if (is_nid && index == tensor->lod()[0][nid_ins_index]) { + if (is_nid && + static_cast(index) == tensor->lod()[0][nid_ins_index]) { nid_show_.push_back(fea_value[fea_idx][0]); ++nid_ins_index; } @@ -346,7 +347,8 @@ void DownpourWorker::FillSparseValue(size_t table_idx) { } memcpy(ptr + table.emb_dim() * index, fea_value[fea_idx].data() + 2, sizeof(float) * table.emb_dim()); - if (is_nid && index == tensor->lod()[0][nid_ins_index]) { + if (is_nid && + static_cast(index) == tensor->lod()[0][nid_ins_index]) { nid_show_.push_back(fea_value[fea_idx][0]); ++nid_ins_index; } @@ -402,7 +404,7 @@ void DownpourWorker::AdjustInsWeight() { int64_t nid_adjw_num = 0; double nid_adjw_weight = 0.0; size_t ins_index = 0; - for (int i = 0; i < len; ++i) { + for (size_t i = 0; i < len; ++i) { float nid_show = nid_show_[i]; VLOG(3) << "nid_show " << nid_show; if (nid_show < 0) { @@ -970,7 +972,7 @@ void DownpourWorker::TrainFiles() { } } if (need_dump_field_) { - int batch_size = device_reader_->GetCurBatchSize(); + size_t batch_size = device_reader_->GetCurBatchSize(); std::vector ars(batch_size); for (auto& ar : ars) { ar.clear(); @@ -990,7 +992,7 @@ void DownpourWorker::TrainFiles() { if (!CheckValidOutput(tensor, batch_size)) { continue; } - for (int i = 0; i < batch_size; ++i) { + for (size_t i = 0; i < batch_size; ++i) { auto output_dim = tensor->dims()[1]; std::string output_dimstr = boost::lexical_cast(output_dim); diff --git a/paddle/fluid/framework/pull_dense_worker.cc b/paddle/fluid/framework/pull_dense_worker.cc index 20d7f98e936..1956885d666 100644 --- a/paddle/fluid/framework/pull_dense_worker.cc +++ b/paddle/fluid/framework/pull_dense_worker.cc @@ -32,8 +32,8 @@ void PullDenseWorker::Initialize(const TrainerDesc& param) { threshold_ = param_.threshold(); thread_num_ = param_.device_num(); sleep_time_ms_ = param_.sleep_time_ms(); - for (size_t i = 0; - i < dwp_param_.program_config(0).pull_dense_table_id_size(); ++i) { + for (int i = 0; i < dwp_param_.program_config(0).pull_dense_table_id_size(); + ++i) { uint64_t tid = static_cast( dwp_param_.program_config(0).pull_dense_table_id(i)); TableParameter table; @@ -67,7 +67,7 @@ void PullDenseWorker::Wait(std::vector<::std::future>* status_vec) { } } - int MAX_FAIL_NUM = 20; + size_t MAX_FAIL_NUM = 20; if (pull_dense_fail_times_ > MAX_FAIL_NUM) { LOG(FATAL) << "Pull Dense Failed Times More Than " << MAX_FAIL_NUM << " Times"; @@ -85,8 +85,8 @@ void PullDenseWorker::Stop() { void PullDenseWorker::PullDense(bool force_update) { pull_dense_status_.resize(0); - for (size_t i = 0; - i < dwp_param_.program_config(0).pull_dense_table_id_size(); ++i) { + for (int i = 0; i < dwp_param_.program_config(0).pull_dense_table_id_size(); + ++i) { uint64_t tid = static_cast( dwp_param_.program_config(0).pull_dense_table_id(i)); if (force_update || CheckUpdateParam(tid)) { @@ -127,7 +127,8 @@ bool PullDenseWorker::CheckUpdateParam(uint64_t table_id) { auto& version = training_versions_[table_id]; current_version_[table_id] = *(std::min_element(version.begin(), version.end())); - if (current_version_[table_id] - last_versions_[table_id] < threshold_) { + if (current_version_[table_id] - last_versions_[table_id] < + static_cast(threshold_)) { return false; } return true; diff --git a/paddle/fluid/imperative/tests/test_tracer.cc b/paddle/fluid/imperative/tests/test_tracer.cc index e9d62e376a6..b1f311bb61e 100644 --- a/paddle/fluid/imperative/tests/test_tracer.cc +++ b/paddle/fluid/imperative/tests/test_tracer.cc @@ -69,7 +69,7 @@ TEST(test_tracer, test_trace_op) { mul_attr_map["use_mkldnn"] = false; tracer.TraceOp("mul", ins, outs, mul_attr_map, place, true); const auto& out_tensor = vout->Var().Get(); - for (size_t i = 0; i < vout->Var().Get().numel(); i++) { + for (int i = 0; i < vout->Var().Get().numel(); i++) { ASSERT_EQ(out_tensor.data()[i], 20.0); } } @@ -108,7 +108,7 @@ TEST(test_tracer, test_trace_op_with_backward) { mul_attr_map["use_mkldnn"] = false; tracer.TraceOp("mul", ins, outs, mul_attr_map, place, true); const auto& out_tensor = vout->Var().Get(); - for (size_t i = 0; i < vout->Var().Get().numel(); i++) { + for (int i = 0; i < vout->Var().Get().numel(); i++) { ASSERT_EQ(out_tensor.data()[i], 20.0); } } @@ -239,14 +239,14 @@ TEST(test_tracer, test_trace_op_with_multi_device_inputs) { framework::LoDTensor rlt; framework::TensorCopySync(vout->Var().Get(), place, &rlt); - for (size_t i = 0; i < rlt.numel(); i++) { + for (int i = 0; i < rlt.numel(); i++) { ASSERT_EQ(rlt.data()[i], 4.0); } framework::LoDTensor out_grad; framework::TensorCopySync(vout->GradVar().Get(), place, &out_grad); - for (size_t i = 0; i < out_grad.numel(); ++i) { + for (int i = 0; i < out_grad.numel(); ++i) { ASSERT_EQ(out_grad.data()[i], 1.0); } @@ -254,7 +254,7 @@ TEST(test_tracer, test_trace_op_with_multi_device_inputs) { framework::TensorCopySync(x_in->GradVar().Get(), place, &x_grad); - for (size_t i = 0; i < x_grad.numel(); ++i) { + for (int i = 0; i < x_grad.numel(); ++i) { ASSERT_EQ(x_grad.data()[i], 1.0); } @@ -262,7 +262,7 @@ TEST(test_tracer, test_trace_op_with_multi_device_inputs) { framework::TensorCopySync(y_in->GradVar().Get(), place, &y_grad); - for (size_t i = 0; i < y_grad.numel(); ++i) { + for (int i = 0; i < y_grad.numel(); ++i) { ASSERT_EQ(y_grad.data()[i], 1.0); } } diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index e00d606f644..12e383b1de7 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -26,7 +26,7 @@ namespace paddle { namespace operators { static inline framework::DDim ComputeAndCheckShape( const bool is_runtime, const std::vector& inputs_dims, - const int axis) { + const size_t axis) { const size_t n = inputs_dims.size(); auto out_dims = inputs_dims[0]; size_t in_zero_dims_size = out_dims.size(); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index ce60e97f4b2..263e7e5d549 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -114,7 +114,7 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { if (!channel_last) { output_shape.push_back(filter_dims[0]); } - for (size_t i = 0; i < in_data_dims.size(); ++i) { + for (int i = 0; i < in_data_dims.size(); ++i) { if ((!ctx->IsRuntime()) && (in_data_dims[i] <= 0 || filter_dims[i + 2] <= 0)) { output_shape.push_back(-1); diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index 1f765b66283..5500e6b0d79 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -72,8 +72,8 @@ inline void UpdatePaddingAndDilation(std::vector* paddings, const std::vector& ksize) { // set padding size == data_dims.size() * 2 auto data_shape = framework::vectorize(data_dims); - if (paddings->size() == data_dims.size()) { - for (size_t i = 0; i < data_dims.size(); ++i) { + if (static_cast(paddings->size()) == data_dims.size()) { + for (int i = 0; i < data_dims.size(); ++i) { int copy_pad = *(paddings->begin() + 2 * i); paddings->insert(paddings->begin() + 2 * i + 1, copy_pad); } @@ -85,7 +85,7 @@ inline void UpdatePaddingAndDilation(std::vector* paddings, // when padding_algorithm is "VALID" or "SAME" if (padding_algorithm == "SAME") { - for (size_t i = 0; i < data_dims.size(); ++i) { + for (int i = 0; i < data_dims.size(); ++i) { int out_size = (data_dims[i] + strides[i] - 1) / strides[i]; int pad_sum = std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0); diff --git a/paddle/fluid/operators/distributed/communicator.cc b/paddle/fluid/operators/distributed/communicator.cc index 2027a874630..d3d0658edcd 100644 --- a/paddle/fluid/operators/distributed/communicator.cc +++ b/paddle/fluid/operators/distributed/communicator.cc @@ -192,8 +192,8 @@ void AsyncCommunicator::SendThread() { auto send_task = [this, &var_name, &var_queue] { VLOG(3) << var_name << " merge and send"; std::vector> vars; - size_t merged_var_num = 0; - size_t wait_times = 0; + int merged_var_num = 0; + int wait_times = 0; while (merged_var_num < FLAGS_communicator_max_merge_var_num) { if (var_queue->Size() == 0) { VLOG(3) << "wait_times -> " << wait_times; @@ -254,7 +254,7 @@ void AsyncCommunicator::SendThread() { void AsyncCommunicator::RecvThread() { VLOG(3) << "RecvThread start!"; while (running_) { - auto grad_num = grad_num_.load(); + int grad_num = grad_num_.load(); if (grad_num > FLAGS_communicator_min_send_grad_num_before_recv) { VLOG(1) << "current grad num " << grad_num; RecvAll(); @@ -538,7 +538,7 @@ void GeoSgdCommunicator::Send(const std::vector &sparse_var_names, int element_number = var_tensor.numel(); int *var_mutable_data = var_tensor.mutable_data(var_tensor.place()); // insert ids which has not been record - for (size_t j = 0; j < element_number; j++) { + for (int j = 0; j < element_number; j++) { auto ep_idx = GetSectionIndex(var_mutable_data[j], absolute_section_[sparse_var_tables[i]]); ids_table->at(sparse_var_tables[i])[ep_idx].insert(var_mutable_data[j]); @@ -559,7 +559,7 @@ void GeoSgdCommunicator::SendThread() { std::vector> task_futures; task_futures.reserve(send_varname_to_ctx_.size()); - size_t wait_times = 0; + int wait_times = 0; while (ids_send_vec_.size() < geo_need_push_nums_) { VLOG(4) << "ids_send_vec_ Size: " << ids_send_vec_.size(); if (need_push_queue_->Size() > 0) { @@ -747,7 +747,7 @@ void GeoSgdCommunicator::SendUpdateSparseVars( auto cpu_ctx = paddle::platform::CPUDeviceContext(); auto blas = math::GetBlas(cpu_ctx); float avg = 1 / static_cast(trainer_nums_); - for (int y = 0; y < new_rows.size(); y++) { + for (size_t y = 0; y < new_rows.size(); y++) { auto ids = new_rows[y]; float *x_val = x_value + ids * row_numel; @@ -876,7 +876,7 @@ void GeoSgdCommunicator::RecvUpdateSparseVars( auto cpu_ctx = paddle::platform::CPUDeviceContext(); auto blas = math::GetBlas(cpu_ctx); - for (int y = 0; y < new_rows.size(); y++) { + for (size_t y = 0; y < new_rows.size(); y++) { std::vector row_delta(row_numel, 0); auto ids = new_rows[y]; diff --git a/paddle/fluid/operators/distributed/communicator.h b/paddle/fluid/operators/distributed/communicator.h index eb702bec906..7942c5f1667 100644 --- a/paddle/fluid/operators/distributed/communicator.h +++ b/paddle/fluid/operators/distributed/communicator.h @@ -418,7 +418,7 @@ class GeoSgdCommunicator : public Communicator { private: int trainer_nums_ = 1; - int geo_need_push_nums_ = 100; + size_t geo_need_push_nums_ = 100; bool is_geo_sgd_ = false; Scope* training_scope_; std::shared_ptr delta_scope_; // parameter local delta: recv - old diff --git a/paddle/fluid/operators/distributed/communicator_test.cc b/paddle/fluid/operators/distributed/communicator_test.cc index 6ffd362e332..e2b69b49da1 100644 --- a/paddle/fluid/operators/distributed/communicator_test.cc +++ b/paddle/fluid/operators/distributed/communicator_test.cc @@ -75,7 +75,7 @@ TEST(communicator, merge_selected_rows) { auto dims = framework::make_ddim({static_cast(rows.size()), width}); auto *data = slr->mutable_value()->mutable_data(dims, cpu_place); - for (auto i = 0; i < rows.size(); ++i) { + for (size_t i = 0; i < rows.size(); ++i) { for (auto j = 0; j < width; ++j) { data[i * width + j] = static_cast(rows[i]); } @@ -97,7 +97,7 @@ TEST(communicator, merge_selected_rows) { for (auto i = 0; i < 10; ++i) { out_values.push_back(static_cast(i * (10 - i))); } - for (auto i = 0; i < out_slr.rows().size(); ++i) { + for (size_t i = 0; i < out_slr.rows().size(); ++i) { ASSERT_EQ(out_slr.rows()[i], i); for (auto j = 0; j < width; ++j) { ASSERT_EQ(out_data[i * width + j], out_values[i]); diff --git a/paddle/fluid/operators/distributed/parameter_prefetch.cc b/paddle/fluid/operators/distributed/parameter_prefetch.cc index c8b8561d673..b6ee2d29df8 100644 --- a/paddle/fluid/operators/distributed/parameter_prefetch.cc +++ b/paddle/fluid/operators/distributed/parameter_prefetch.cc @@ -215,7 +215,7 @@ void prefetchs(const std::vector& id_var_names, std::unordered_set s(ids_union.begin(), ids_union.end()); ids_union.assign(s.begin(), s.end()); - for (int i = 0; i < table_names.size(); i++) { + for (size_t i = 0; i < table_names.size(); i++) { tables.push_back(std::make_pair(table_names[i], endpoints[i])); } @@ -230,7 +230,7 @@ void prefetchs(const std::vector& id_var_names, } // copy vectors to out vars - for (int i = 0; i < out_var_names.size(); i++) { + for (size_t i = 0; i < out_var_names.size(); i++) { auto& ids = ids_group[i]; auto* out_t = scope.FindVar(out_var_names[i])->GetMutable(); @@ -240,7 +240,7 @@ void prefetchs(const std::vector& id_var_names, auto* out_d = out_t->mutable_data(place); - for (int idx = 0; idx < ids.size(); idx++) { + for (size_t idx = 0; idx < ids.size(); idx++) { const auto& id = ids[idx]; if (padding_idx != distributed::kNoPadding && id == padding_idx) { diff --git a/paddle/fluid/operators/distributed/parameter_send.cc b/paddle/fluid/operators/distributed/parameter_send.cc index 4fe88867a89..c9a0309d043 100644 --- a/paddle/fluid/operators/distributed/parameter_send.cc +++ b/paddle/fluid/operators/distributed/parameter_send.cc @@ -50,12 +50,12 @@ inline EP_SPLIT_TABLE_PAIRS GetMultiFieldRpcContext( PADDLE_ENFORCE_GT(multi_parts, 0, "multi_parts must >=1"); if (multi_parts == 1) { - for (int i = 0; i < rpc_ctx.splited_var_names.size(); i++) { + for (size_t i = 0; i < rpc_ctx.splited_var_names.size(); i++) { table_pairs.push_back( std::make_pair(rpc_ctx.epmap[i], rpc_ctx.splited_var_names[i])); } } else { - for (int i = 0; i < rpc_ctx.splited_var_names.size(); i++) { + for (size_t i = 0; i < rpc_ctx.splited_var_names.size(); i++) { for (int x = 0; x < multi_parts; x++) { auto table = string::Sprintf("%s@%d@PIECE", rpc_ctx.splited_var_names[i], x); diff --git a/paddle/fluid/operators/distributed/request_handler_impl.cc b/paddle/fluid/operators/distributed/request_handler_impl.cc index d0d4f49f49f..a69e097e2a8 100644 --- a/paddle/fluid/operators/distributed/request_handler_impl.cc +++ b/paddle/fluid/operators/distributed/request_handler_impl.cc @@ -171,7 +171,7 @@ bool RequestGetHandler::Handle(const std::string& varname, auto* data = out_slr->mutable_value()->mutable_data( out_dims, origin_tensor.place()); auto width = dims[1]; - for (auto i = 0; i < updated_rows.size(); ++i) { + for (size_t i = 0; i < updated_rows.size(); ++i) { PADDLE_ENFORCE_LT(updated_rows[i], dims[0]); memcpy(data + i * width, origin_tensor_data + updated_rows[i] * width, sizeof(float) * width); diff --git a/paddle/fluid/operators/fused/conv_fusion_op.cc b/paddle/fluid/operators/fused/conv_fusion_op.cc index 096d48d730b..f4da7ec4ddc 100644 --- a/paddle/fluid/operators/fused/conv_fusion_op.cc +++ b/paddle/fluid/operators/fused/conv_fusion_op.cc @@ -137,7 +137,7 @@ class Conv2DFusionOpInferShape : public framework::InferShapeBase { std::vector output_shape({in_dims[0]}); output_shape.push_back(filter_dims[0]); - for (size_t i = 0; i < in_data_dims.size(); ++i) { + for (int i = 0; i < in_data_dims.size(); ++i) { if ((!ctx->IsRuntime()) && (in_data_dims[i] <= 0 || filter_dims[i + 2] <= 0)) { output_shape.push_back(-1); diff --git a/paddle/fluid/operators/jit/gen/matmul.cc b/paddle/fluid/operators/jit/gen/matmul.cc index 9e9ee8df55b..3a455334f58 100644 --- a/paddle/fluid/operators/jit/gen/matmul.cc +++ b/paddle/fluid/operators/jit/gen/matmul.cc @@ -41,7 +41,7 @@ void MatMulJitCode::genCode() { for (size_t g = 0; g < groups.size(); ++g) { size_t x_offset = 0; size_t wgt_offset_tmp = 0; - for (int i = 0; i < g; ++i) { + for (size_t i = 0; i < g; ++i) { wgt_offset_tmp += groups[i] * block_len; } for (int k = 0; k < k_; ++k) { diff --git a/paddle/fluid/operators/math/padding.h b/paddle/fluid/operators/math/padding.h index 83cc9a2ca9b..63f793433de 100644 --- a/paddle/fluid/operators/math/padding.h +++ b/paddle/fluid/operators/math/padding.h @@ -122,8 +122,8 @@ void PaddingGradFunctor(int rank, const framework::ExecutionContext& context, inline bool IsSymmetricPadding(const std::vector& pads, const int data_dim) { bool is_sys_pad = true; - if (pads.size() == data_dim * 2) { - for (size_t i = 0; i < data_dim; ++i) { + if (static_cast(pads.size()) == data_dim * 2) { + for (int i = 0; i < data_dim; ++i) { if (pads[2 * i] != pads[2 * i + 1]) { is_sys_pad = false; return is_sys_pad; diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index 3b6246cc887..5db94f95b99 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -65,8 +65,8 @@ inline void UpdatePadding(std::vector* paddings, const bool global_pooling, const std::vector& ksize) { // set padding size == data_dims.size() * 2 auto data_shape = framework::vectorize(data_dims); - if (paddings->size() == data_dims.size()) { - for (size_t i = 0; i < data_dims.size(); ++i) { + if (static_cast(paddings->size()) == data_dims.size()) { + for (int i = 0; i < data_dims.size(); ++i) { int copy_pad = *(paddings->begin() + 2 * i); paddings->insert(paddings->begin() + 2 * i + 1, copy_pad); } diff --git a/paddle/fluid/operators/split_op.h b/paddle/fluid/operators/split_op.h index 7a84e13c664..3bad392e1ba 100644 --- a/paddle/fluid/operators/split_op.h +++ b/paddle/fluid/operators/split_op.h @@ -94,7 +94,7 @@ static inline std::vector UpdateOutsDims( framework::make_ddim(sections), in_dims, axis); } } - for (size_t i = 0; i < outs_number; ++i) { + for (int i = 0; i < outs_number; ++i) { outs_dims[i][axis] = sections[i]; } } diff --git a/paddle/fluid/operators/uniform_random_op.h b/paddle/fluid/operators/uniform_random_op.h index b513656bf7f..32e3b034fd2 100644 --- a/paddle/fluid/operators/uniform_random_op.h +++ b/paddle/fluid/operators/uniform_random_op.h @@ -44,7 +44,7 @@ inline std::vector GetNewDataFromShapeTensor( &cpu_starts_tensor); new_data = cpu_starts_tensor.data(); } - for (size_t i = 0; i < new_data_tensor->numel(); ++i) { + for (int i = 0; i < new_data_tensor->numel(); ++i) { vec_new_data.push_back(static_cast(*(new_data + i))); } return vec_new_data; -- GitLab