diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc index 4ca5f9c8207fe5b06fe2dc51f4c791a996e31b3a..129e78d677d5adc75fe6c828c343dbe32f188633 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc @@ -136,7 +136,7 @@ std::future BrpcPsServer::SendPServer2PServerMsg( int msg_type, int to_pserver_id, const std::string &msg) { auto promise = std::make_shared>(); std::future fut = promise->get_future(); - if (to_pserver_id >= _pserver_channels.size()) { + if (static_cast(to_pserver_id) >= _pserver_channels.size()) { LOG(FATAL) << "to_pserver_id is out of range pservers, which size is " << _pserver_channels.size(); promise->set_value(-1); diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.cc b/paddle/fluid/distributed/ps/service/communicator/communicator.cc index edbfd06d55a5435f3416dd977c4f3aa8bfc01bf7..cf8d289d4c876fad4561ba9a4a039089918abb74 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.cc +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.cc @@ -727,7 +727,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync( ++input_idx; } } - CHECK(static_cast(output_len) == g_tensor->numel()); + CHECK(static_cast(output_len) == g_tensor->numel()); } std::vector push_g_vec(input_idx, nullptr); diff --git a/paddle/fluid/distributed/ps/table/barrier_table.cc b/paddle/fluid/distributed/ps/table/barrier_table.cc index b9d0345313cc3728f828ffecd439f3890a436cb8..f94c095d28d64e9cc52e8543be59495920a9feda 100644 --- a/paddle/fluid/distributed/ps/table/barrier_table.cc +++ b/paddle/fluid/distributed/ps/table/barrier_table.cc @@ -42,7 +42,7 @@ int32_t BarrierTable::Barrier(const uint32_t trainer_id, << " add trainer id: " << trainer_id; } - if (trainer_ids_.size() < trigger_.load()) { + if (static_cast(trainer_ids_.size()) < trigger_.load()) { std::vector diffs(trainer_all_.size()); auto iter = std::set_difference(trainer_all_.begin(), trainer_all_.end(), trainer_ids_.begin(), trainer_ids_.end(), diff --git a/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc b/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc index 44c672eff61c04511a0e7e4f965b5558a7fddd40..791d3f2f95c6e766d97316c897f1042990977e8a 100644 --- a/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc +++ b/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc @@ -234,7 +234,7 @@ int32_t CtrDoubleAccessor::Merge(float** update_values, update_value[i] += other_update_value[i]; }*/ for (size_t i = 0; i < total_dim; ++i) { - if (i != CtrDoublePushValue::SlotIndex()) { + if (static_cast(i) != CtrDoublePushValue::SlotIndex()) { update_value[i] += other_update_value[i]; } } diff --git a/paddle/fluid/distributed/ps/table/memory_dense_table.cc b/paddle/fluid/distributed/ps/table/memory_dense_table.cc index 857850ce50b6a7e93fb83eed813a401c426eab59..3a581b671c4b845e51406711891b6afb6e7a9cd1 100644 --- a/paddle/fluid/distributed/ps/table/memory_dense_table.cc +++ b/paddle/fluid/distributed/ps/table/memory_dense_table.cc @@ -276,7 +276,7 @@ int32_t MemoryDenseTable::Load(const std::string& path, CHECK(str_len == param_col_ids_.size()) << "expect " << param_col_ids_.size() << " float, but got " << str_len; - for (int col_idx = 0; col_idx < str_len; ++col_idx) { + for (size_t col_idx = 0; col_idx < str_len; ++col_idx) { if (param_col_ids_[col_idx] < 0) { continue; } diff --git a/paddle/fluid/distributed/ps/table/memory_sparse_table.cc b/paddle/fluid/distributed/ps/table/memory_sparse_table.cc index 171853f96672d6eafd015eb9d98703e898bf2904..f6a2ad0e02e44bc2aab16343d00c6829036e9226 100644 --- a/paddle/fluid/distributed/ps/table/memory_sparse_table.cc +++ b/paddle/fluid/distributed/ps/table/memory_sparse_table.cc @@ -53,7 +53,8 @@ int32_t MemorySparseTable::InitializeValue() { _avg_local_shard_num = sparse_local_shard_num(_sparse_table_shard_num, _shard_num); _real_local_shard_num = _avg_local_shard_num; - if (_real_local_shard_num * (_shard_idx + 1) > _sparse_table_shard_num) { + if (static_cast(_real_local_shard_num * (_shard_idx + 1)) > + _sparse_table_shard_num) { _real_local_shard_num = _sparse_table_shard_num - _real_local_shard_num * _shard_idx; _real_local_shard_num = diff --git a/paddle/fluid/distributed/ps/table/sparse_accessor.cc b/paddle/fluid/distributed/ps/table/sparse_accessor.cc index 081a77cedf792672df0ed554f90279f005ec3d02..a5435064c6bf91c52b6780484e34b5e964b4478e 100644 --- a/paddle/fluid/distributed/ps/table/sparse_accessor.cc +++ b/paddle/fluid/distributed/ps/table/sparse_accessor.cc @@ -202,7 +202,7 @@ int32_t SparseAccessor::Merge(float** update_values, float* update_value = update_values[value_item]; const float* other_update_value = other_update_values[value_item]; for (size_t i = 0; i < total_dim; ++i) { - if (i != SparsePushValue::SlotIndex()) { + if (static_cast(i) != SparsePushValue::SlotIndex()) { update_value[i] += other_update_value[i]; } } diff --git a/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc b/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc index dc77a6c6c51e2f223f69789429952c6099fa96c9..76d00f2f017d9975067ac268177a1fa3c1639414 100644 --- a/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc +++ b/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc @@ -637,8 +637,9 @@ int32_t SSDSparseTable::Load(size_t start_idx, size_t end_idx, size_t mf_value_size = _value_accesor->GetAccessorInfo().mf_size / sizeof(float); - end_idx = - end_idx < _sparse_table_shard_num ? end_idx : _sparse_table_shard_num; + end_idx = static_cast(end_idx) < _sparse_table_shard_num + ? end_idx + : _sparse_table_shard_num; int thread_num = (end_idx - start_idx) < 20 ? (end_idx - start_idx) : 20; omp_set_num_threads(thread_num); #pragma omp parallel for schedule(dynamic) diff --git a/paddle/fluid/distributed/ps/wrapper/fleet.cc b/paddle/fluid/distributed/ps/wrapper/fleet.cc index bddda8f8fff8acf16d69131162e4b2af23b2b351..d08e0e518e80894e4ca645f2c812778d9e0d5455 100644 --- a/paddle/fluid/distributed/ps/wrapper/fleet.cc +++ b/paddle/fluid/distributed/ps/wrapper/fleet.cc @@ -555,10 +555,12 @@ void FleetWrapper::PushSparseFromTensorAsync( // in // ctr_accessor.h push_values.back()[0] = 2; // TODO(zhaocaibei123): slot - push_values.back()[1] = - (i >= show_size ? 1 : static_cast(show_tensor[i])); - push_values.back()[2] = - (i >= clk_size ? 0 : static_cast(clk_tensor[i])); + push_values.back()[1] = (static_cast(i) >= show_size + ? 1 + : static_cast(show_tensor[i])); + push_values.back()[2] = (static_cast(i) >= clk_size + ? 0 + : static_cast(clk_tensor[i])); float* data = push_values.back().data() + 3; memcpy(data, g + output_len, sizeof(float) * fea_dim); } @@ -582,10 +584,12 @@ void FleetWrapper::PushSparseFromTensorAsync( // slot show clk grad... consistent with CtrCommonPushValue defined in // ctr_accessor.h push_values.back()[0] = 2; // TODO(zhaocaibei123): slot - push_values.back()[1] = - (i >= show_size ? 1 : static_cast(show_tensor[i])); - push_values.back()[2] = - (i >= clk_size ? 0 : static_cast(clk_tensor[i])); + push_values.back()[1] = (static_cast(i) >= show_size + ? 1 + : static_cast(show_tensor[i])); + push_values.back()[2] = (static_cast(i) >= clk_size + ? 0 + : static_cast(clk_tensor[i])); float* data = push_values.back().data() + 3; memcpy(data, g + output_len, sizeof(float) * fea_dim); } diff --git a/paddle/fluid/distributed/test/ctr_accessor_test.cc b/paddle/fluid/distributed/test/ctr_accessor_test.cc index bb25fd699166575a15f9e901a2b33c19948b0d16..0e0acec164e47b4fa053f6863ee90f50e7be85fb 100644 --- a/paddle/fluid/distributed/test/ctr_accessor_test.cc +++ b/paddle/fluid/distributed/test/ctr_accessor_test.cc @@ -239,7 +239,7 @@ TEST(downpour_feature_value_accessor_test, test_update) { push_v.show = grad[i][1]; push_v.click = grad[i][2]; push_v.embed_g = grad[i][3]; - for (int j = 0; j < parameter.embedx_dim(); ++j) { + for (unsigned int j = 0; j < parameter.embedx_dim(); ++j) { push_v.embedx_g.push_back(grad[i][4 + j]); }