From ce704ee902132e427584b2317d53f6d0f1f3f09c Mon Sep 17 00:00:00 2001 From: zhangchunle Date: Fri, 17 Jun 2022 10:19:47 +0800 Subject: [PATCH] fix sign-compare warning (#43401) --- paddle/fluid/distributed/ps/service/brpc_ps_client.cc | 7 ++++--- paddle/fluid/distributed/ps/service/brpc_ps_server.cc | 2 +- .../distributed/ps/service/communicator/communicator.cc | 4 ++-- paddle/fluid/distributed/ps/service/graph_brpc_server.cc | 7 ++++--- paddle/fluid/distributed/ps/service/ps_client.cc | 2 +- paddle/fluid/distributed/ps/service/ps_local_client.cc | 2 +- .../distributed/ps/service/ps_service/graph_py_service.cc | 2 +- .../distributed/ps/service/ps_service/graph_py_service.h | 6 +++--- paddle/fluid/distributed/ps/service/server.cc | 2 +- paddle/fluid/distributed/ps/table/common_graph_table.cc | 4 ++-- paddle/fluid/distributed/ps/table/memory_dense_table.cc | 4 ++-- paddle/fluid/distributed/ps/table/ssd_sparse_table.cc | 5 +++-- paddle/fluid/distributed/ps/table/ssd_sparse_table.h | 2 +- paddle/fluid/distributed/ps/wrapper/fleet.cc | 6 +++--- .../distributed/test/brpc_service_sparse_sgd_test.cc | 2 +- paddle/fluid/distributed/test/ctr_accessor_test.cc | 8 ++++---- paddle/fluid/distributed/test/memory_sparse_table_test.cc | 2 +- paddle/fluid/pybind/fleet_py.cc | 8 ++++---- 18 files changed, 39 insertions(+), 36 deletions(-) diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc index 47e3476036d..f43493b10fe 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc @@ -136,7 +136,7 @@ int32_t BrpcPsClient::CreateClient2ClientConnection( server_ip_port.append(":"); server_ip_port.append(std::to_string(client_list[i].port)); _client_channels[i].reset(new brpc::Channel()); - if (_client_channels[i]->Init(server_ip_port.c_str(), "", &options) != 0) { + if (_client_channels[i]->Init(server_ip_port.c_str(), "", &options)) { VLOG(0) << "BrpcPSClient connect to Client:" << server_ip_port << " Failed! Try again."; std::string int_ip_port = @@ -1195,7 +1195,8 @@ std::future BrpcPsClient::SendClient2ClientMsg( int msg_type, int to_client_id, const std::string &msg) { auto promise = std::make_shared>(); std::future fut = promise->get_future(); - if (to_client_id >= _client_channels.size()) { + if (to_client_id >= 0 && + static_cast(to_client_id) >= _client_channels.size()) { VLOG(0) << "to_client_id is out of range clients, which size is " << _client_channels.size(); promise->set_value(-1); @@ -1778,7 +1779,7 @@ void BrpcPsClient::PushDenseTaskConsume() { }); ++merge_count; } - for (uint32_t i = 0; i < merge_count; ++i) { + for (size_t i = 0; i < merge_count; ++i) { merge_status[i].wait(); } diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc index d859acbb42e..4ca5f9c8207 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc @@ -713,7 +713,7 @@ int32_t BrpcPsService::CacheShuffle(Table *table, }; std::vector table_ptrs; - for (size_t i = 3; i < request.params_size(); ++i) { + for (int i = 3; i < request.params_size(); ++i) { int table_id = std::stoi(request.params(i)); Table *table_ptr = _server->GetTable(table_id); table_ptrs.push_back(table_ptr); diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.cc b/paddle/fluid/distributed/ps/service/communicator/communicator.cc index c50f1d909cd..edbfd06d55a 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.cc +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.cc @@ -681,7 +681,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync( if (tensor->lod().size() > 0) { for (size_t i = 0; i < tensor->lod()[0].size() - 1; ++i) { - for (int j = tensor->lod()[0][i]; j < tensor->lod()[0][i + 1]; + for (size_t j = tensor->lod()[0][i]; j < tensor->lod()[0][i + 1]; ++j, output_len += fea_dim) { uint64_t real_id = static_cast(ids[j]); if (real_id == padding_id) { @@ -727,7 +727,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync( ++input_idx; } } - CHECK(output_len == g_tensor->numel()); + CHECK(static_cast(output_len) == g_tensor->numel()); } std::vector push_g_vec(input_idx, nullptr); diff --git a/paddle/fluid/distributed/ps/service/graph_brpc_server.cc b/paddle/fluid/distributed/ps/service/graph_brpc_server.cc index ce9397e511e..8128f2b2adb 100644 --- a/paddle/fluid/distributed/ps/service/graph_brpc_server.cc +++ b/paddle/fluid/distributed/ps/service/graph_brpc_server.cc @@ -547,7 +547,8 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers( seq.push_back(request_idx); } size_t remote_call_num = request_call_num; - if (request2server.size() != 0 && request2server.back() == rank) { + if (request2server.size() != 0 && + static_cast(request2server.back()) == rank) { remote_call_num--; local_buffers.resize(node_id_buckets.back().size()); local_actual_sizes.resize(node_id_buckets.back().size()); @@ -582,7 +583,7 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers( for (size_t i = 0; i < node_num; i++) { if (fail_num > 0 && failed[seq[i]]) { size = 0; - } else if (request2server[seq[i]] != rank) { + } else if (static_cast(request2server[seq[i]]) != rank) { res[seq[i]]->copy_and_forward(&size, sizeof(int)); } else { size = local_actual_sizes[local_index++]; @@ -596,7 +597,7 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers( for (size_t i = 0; i < node_num; i++) { if (fail_num > 0 && failed[seq[i]]) { continue; - } else if (request2server[seq[i]] != rank) { + } else if (static_cast(request2server[seq[i]]) != rank) { char temp[actual_size[i] + 1]; res[seq[i]]->copy_and_forward(temp, actual_size[i]); cntl->response_attachment().append(temp, actual_size[i]); diff --git a/paddle/fluid/distributed/ps/service/ps_client.cc b/paddle/fluid/distributed/ps/service/ps_client.cc index a0216f2a795..2d02771a2cf 100644 --- a/paddle/fluid/distributed/ps/service/ps_client.cc +++ b/paddle/fluid/distributed/ps/service/ps_client.cc @@ -43,7 +43,7 @@ int32_t PSClient::Configure( const auto &work_param = _config.worker_param().downpour_worker_param(); - for (size_t i = 0; i < work_param.downpour_table_param_size(); ++i) { + for (int i = 0; i < work_param.downpour_table_param_size(); ++i) { auto *accessor = CREATE_PSCORE_CLASS( ValueAccessor, work_param.downpour_table_param(i).accessor().accessor_class()); diff --git a/paddle/fluid/distributed/ps/service/ps_local_client.cc b/paddle/fluid/distributed/ps/service/ps_local_client.cc index b6407ccebe5..a52ed1996ff 100644 --- a/paddle/fluid/distributed/ps/service/ps_local_client.cc +++ b/paddle/fluid/distributed/ps/service/ps_local_client.cc @@ -23,7 +23,7 @@ namespace distributed { int32_t PsLocalClient::Initialize() { const auto& downpour_param = _config.server_param().downpour_server_param(); TableManager::Instance().Initialize(); - for (size_t i = 0; i < downpour_param.downpour_table_param_size(); ++i) { + for (int i = 0; i < downpour_param.downpour_table_param_size(); ++i) { auto* table = CREATE_PSCORE_CLASS( Table, downpour_param.downpour_table_param(i).table_class()); table->SetShard(0, 1); diff --git a/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc b/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc index 255c0d3d655..fb65e74b62f 100644 --- a/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc +++ b/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc @@ -51,7 +51,7 @@ void GraphPyService::add_table_feat_conf(std::string table_name, int feat_idx = table_feat_mapping[idx][feat_name]; VLOG(0) << "table_name " << table_name << " mapping id " << idx; VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx; - if (feat_idx < table_feat_conf_feat_name[idx].size()) { + if (static_cast(feat_idx) < table_feat_conf_feat_name[idx].size()) { // overide table_feat_conf_feat_name[idx][feat_idx] = feat_name; table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype; diff --git a/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h b/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h index 7dd03401256..877214121e5 100644 --- a/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h +++ b/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h @@ -81,14 +81,14 @@ class GraphPyService { graph_proto->set_table_name("cpu_graph_table"); graph_proto->set_use_cache(false); - for (int i = 0; i < id_to_edge.size(); i++) + for (size_t i = 0; i < id_to_edge.size(); i++) graph_proto->add_edge_types(id_to_edge[i]); - for (int i = 0; i < id_to_feature.size(); i++) { + for (size_t i = 0; i < id_to_feature.size(); i++) { graph_proto->add_node_types(id_to_feature[i]); auto feat_node = id_to_feature[i]; ::paddle::distributed::GraphFeature* g_f = graph_proto->add_graph_feature(); - for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) { + for (size_t x = 0; x < table_feat_conf_feat_name[i].size(); x++) { g_f->add_name(table_feat_conf_feat_name[i][x]); g_f->add_dtype(table_feat_conf_feat_dtype[i][x]); g_f->add_shape(table_feat_conf_feat_shape[i][x]); diff --git a/paddle/fluid/distributed/ps/service/server.cc b/paddle/fluid/distributed/ps/service/server.cc index a6e0f39474b..e7b3271171e 100644 --- a/paddle/fluid/distributed/ps/service/server.cc +++ b/paddle/fluid/distributed/ps/service/server.cc @@ -76,7 +76,7 @@ int32_t PSServer::Configure( uint32_t barrier_table = UINT32_MAX; uint32_t global_step_table = UINT32_MAX; - for (size_t i = 0; i < downpour_param.downpour_table_param_size(); ++i) { + for (int i = 0; i < downpour_param.downpour_table_param_size(); ++i) { auto *table = CREATE_PSCORE_CLASS( Table, downpour_param.downpour_table_param(i).table_class()); diff --git a/paddle/fluid/distributed/ps/table/common_graph_table.cc b/paddle/fluid/distributed/ps/table/common_graph_table.cc index 55a9c794e8e..d3af468482b 100644 --- a/paddle/fluid/distributed/ps/table/common_graph_table.cc +++ b/paddle/fluid/distributed/ps/table/common_graph_table.cc @@ -1205,7 +1205,7 @@ uint32_t GraphTable::get_thread_pool_index_by_shard_index(int64_t shard_index) { int32_t GraphTable::clear_nodes(int type_id, int idx) { auto &search_shards = type_id == 0 ? edge_shards[idx] : feature_shards[idx]; - for (int i = 0; i < search_shards.size(); i++) { + for (size_t i = 0; i < search_shards.size(); i++) { search_shards[i]->clear(); } return 0; @@ -1478,7 +1478,7 @@ std::vector> GraphTable::get_all_id(int type_id, int idx, std::vector> res(slice_num); auto &search_shards = type_id == 0 ? edge_shards[idx] : feature_shards[idx]; std::vector>> tasks; - for (int i = 0; i < search_shards.size(); i++) { + for (size_t i = 0; i < search_shards.size(); i++) { tasks.push_back(_shards_task_pool[i % task_pool_size_]->enqueue( [&search_shards, i]() -> std::vector { return search_shards[i]->get_all_id(); diff --git a/paddle/fluid/distributed/ps/table/memory_dense_table.cc b/paddle/fluid/distributed/ps/table/memory_dense_table.cc index ab1361eba05..857850ce50b 100644 --- a/paddle/fluid/distributed/ps/table/memory_dense_table.cc +++ b/paddle/fluid/distributed/ps/table/memory_dense_table.cc @@ -81,8 +81,8 @@ int32_t MemoryDenseTable::InitializeValue() { fixed_len_params_dim_ = 0; for (int x = 0; x < size; ++x) { - int dim = common.dims()[x]; - if (dim != param_dim_) { + auto& dim = common.dims()[x]; + if (static_cast(dim) != param_dim_) { fixed_len_params_dim_ += dim; } else { param_col_ids_.push_back(x); diff --git a/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc b/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc index 237d0c9424b..dc77a6c6c51 100644 --- a/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc +++ b/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc @@ -625,7 +625,7 @@ int32_t SSDSparseTable::Load(const std::string& path, } //加载path目录下数据[start_idx, end_idx) -int32_t SSDSparseTable::Load(size_t start_idx, int end_idx, +int32_t SSDSparseTable::Load(size_t start_idx, size_t end_idx, const std::vector& file_list, const std::string& param) { if (start_idx >= file_list.size()) { @@ -699,7 +699,8 @@ int32_t SSDSparseTable::Load(size_t start_idx, int end_idx, ssd_values.emplace_back(std::make_pair((char*)data_buffer_ptr, value_size * sizeof(float))); data_buffer_ptr += feature_value_size; - if (ssd_keys.size() == FLAGS_pserver_load_batch_size) { + if (static_cast(ssd_keys.size()) == + FLAGS_pserver_load_batch_size) { _db->put_batch(local_shard_id, ssd_keys, ssd_values, ssd_keys.size()); ssd_keys.clear(); diff --git a/paddle/fluid/distributed/ps/table/ssd_sparse_table.h b/paddle/fluid/distributed/ps/table/ssd_sparse_table.h index e6be77a4ba9..3e4d3afe59c 100644 --- a/paddle/fluid/distributed/ps/table/ssd_sparse_table.h +++ b/paddle/fluid/distributed/ps/table/ssd_sparse_table.h @@ -79,7 +79,7 @@ class SSDSparseTable : public MemorySparseTable { virtual int32_t Load(const std::string& path, const std::string& param) override; //加载path目录下数据[start_idx, end_idx) - virtual int32_t Load(size_t start_idx, int end_idx, + virtual int32_t Load(size_t start_idx, size_t end_idx, const std::vector& file_list, const std::string& param); int64_t LocalSize(); diff --git a/paddle/fluid/distributed/ps/wrapper/fleet.cc b/paddle/fluid/distributed/ps/wrapper/fleet.cc index 8d6276733e0..bddda8f8fff 100644 --- a/paddle/fluid/distributed/ps/wrapper/fleet.cc +++ b/paddle/fluid/distributed/ps/wrapper/fleet.cc @@ -536,7 +536,7 @@ void FleetWrapper::PushSparseFromTensorAsync( output_len = 0; if (tensor->lod().size() > 0) { - for (int i = 0; i < tensor->lod()[0].size() - 1; ++i) { + for (size_t i = 0; i < tensor->lod()[0].size() - 1; ++i) { for (size_t j = tensor->lod()[0][i]; j < tensor->lod()[0][i + 1]; ++j, output_len += fea_dim) { uint64_t real_id = static_cast(ids[j]); @@ -566,7 +566,7 @@ void FleetWrapper::PushSparseFromTensorAsync( } } } else { - for (int i = 0; i < len; ++i, output_len += fea_dim) { + for (size_t i = 0; i < len; ++i, output_len += fea_dim) { uint64_t real_id = static_cast(ids[i]); if (real_id == padding_id) { continue; @@ -592,7 +592,7 @@ void FleetWrapper::PushSparseFromTensorAsync( ++input_idx; } } - CHECK(output_len == g_tensor->numel()); + CHECK(static_cast(output_len) == g_tensor->numel()); } std::vector push_g_vec(input_idx, nullptr); diff --git a/paddle/fluid/distributed/test/brpc_service_sparse_sgd_test.cc b/paddle/fluid/distributed/test/brpc_service_sparse_sgd_test.cc index bade56f239f..7173c762870 100644 --- a/paddle/fluid/distributed/test/brpc_service_sparse_sgd_test.cc +++ b/paddle/fluid/distributed/test/brpc_service_sparse_sgd_test.cc @@ -295,7 +295,7 @@ void RunBrpcPushSparse() { fea_temp_value_ptr.data(), 0, fea_keys.data(), fea_keys.size(), true); pull_update_status.wait(); - for (size_t idx = 0; idx < tensor->numel(); ++idx) { + for (int64_t idx = 0; idx < tensor->numel(); ++idx) { EXPECT_FLOAT_EQ(fea_temp_values[idx], fea_values[idx] - 1.0); } diff --git a/paddle/fluid/distributed/test/ctr_accessor_test.cc b/paddle/fluid/distributed/test/ctr_accessor_test.cc index 51254391a42..bb25fd69916 100644 --- a/paddle/fluid/distributed/test/ctr_accessor_test.cc +++ b/paddle/fluid/distributed/test/ctr_accessor_test.cc @@ -222,15 +222,15 @@ TEST(downpour_feature_value_accessor_test, test_update) { v.embed_w = value[i][5]; int idx = 6; - for (auto j = 0u; j < acc->common_feature_value.embed_sgd_dim; ++j) { + for (int j = 0; j < acc->common_feature_value.embed_sgd_dim; ++j) { v.embed_g2sum.push_back(value[i][idx + j]); } idx += acc->common_feature_value.embed_sgd_dim; - for (auto j = 0u; j < acc->common_feature_value.embedx_dim; ++j) { + for (int j = 0; j < acc->common_feature_value.embedx_dim; ++j) { v.embedx_w.push_back(value[i][idx + j]); } idx += acc->common_feature_value.embedx_dim; - for (auto j = 0u; j < acc->common_feature_value.embedx_sgd_dim; ++j) { + for (int j = 0; j < acc->common_feature_value.embedx_sgd_dim; ++j) { v.embedx_g2sum.push_back(value[i][idx + j]); } @@ -239,7 +239,7 @@ TEST(downpour_feature_value_accessor_test, test_update) { push_v.show = grad[i][1]; push_v.click = grad[i][2]; push_v.embed_g = grad[i][3]; - for (auto j = 0; j < parameter.embedx_dim(); ++j) { + for (int j = 0; j < parameter.embedx_dim(); ++j) { push_v.embedx_g.push_back(grad[i][4 + j]); } diff --git a/paddle/fluid/distributed/test/memory_sparse_table_test.cc b/paddle/fluid/distributed/test/memory_sparse_table_test.cc index 1689b7716bb..485d81a7d68 100644 --- a/paddle/fluid/distributed/test/memory_sparse_table_test.cc +++ b/paddle/fluid/distributed/test/memory_sparse_table_test.cc @@ -142,7 +142,7 @@ TEST(MemorySparseTable, SGD) { // table->PullSparse(pull_values.data(), value); for (size_t i = 0; i < init_keys.size(); ++i) { - for (size_t j = 2; j < emb_dim + 3; ++j) { + for (int j = 2; j < emb_dim + 3; ++j) { auto update_val = init_values[i * (emb_dim + 1) + j] - 0.1 * total_gradients[3 + i * (emb_dim + 4) + j]; VLOG(3) << total_gradients[i * (emb_dim + 4) + j + 3] << ":" diff --git a/paddle/fluid/pybind/fleet_py.cc b/paddle/fluid/pybind/fleet_py.cc index 25f2c910028..ea404b4f51e 100644 --- a/paddle/fluid/pybind/fleet_py.cc +++ b/paddle/fluid/pybind/fleet_py.cc @@ -221,8 +221,8 @@ void BindGraphPyClient(py::module* m) { auto feats = self.get_node_feat(node_type, node_ids, feature_names); std::vector> bytes_feats(feats.size()); - for (int i = 0; i < feats.size(); ++i) { - for (int j = 0; j < feats[i].size(); ++j) { + for (size_t i = 0; i < feats.size(); ++i) { + for (size_t j = 0; j < feats[i].size(); ++j) { bytes_feats[i].push_back(py::bytes(feats[i][j])); } } @@ -234,8 +234,8 @@ void BindGraphPyClient(py::module* m) { std::vector feature_names, std::vector> bytes_feats) { std::vector> feats(bytes_feats.size()); - for (int i = 0; i < bytes_feats.size(); ++i) { - for (int j = 0; j < bytes_feats[i].size(); ++j) { + for (size_t i = 0; i < bytes_feats.size(); ++i) { + for (size_t j = 0; j < bytes_feats[i].size(); ++j) { feats[i].push_back(std::string(bytes_feats[i][j])); } } -- GitLab