diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc index f43493b10fe996999929809df5779d69bf8d4dc2..b64601a96e47284dad55567bec36f7254eefb6a5 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_client.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_client.cc @@ -1494,8 +1494,6 @@ void BrpcPsClient::PushSparseTaskConsume() { merge_status.clear(); std::vector>().swap(merge_status); _push_sparse_merge_count_map[table_id] = 0; - - auto queue_size = task_queue->Size(); } else { // 未达到阈值 只做多路归并 std::vector> merge_status(request_call_num); for (size_t shard_idx = 0; shard_idx < request_call_num; ++shard_idx) { @@ -1542,7 +1540,6 @@ int BrpcPsClient::PushSparseAsyncShardMerge( std::vector &request_kv_num, int table_id, int shard_idx, ValueAccessor *accessor) { size_t merged_kv_count = 0; - uint64_t min_key = UINT64_MAX; uint32_t value_size = accessor->GetAccessorInfo().update_size; thread_local std::vector> sorted_kv_list; @@ -1771,8 +1768,6 @@ void BrpcPsClient::PushDenseTaskConsume() { accessor->Merge(&total_send_data, &merge_data, total_send_data_size); #pragma optimize("", off) - auto *debug_closure = closure; - auto *debug_task = async_task; delete async_task; #pragma optimize("", on) return 0; diff --git a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc index 129e78d677d5adc75fe6c828c343dbe32f188633..3df4a5e7d7a2a1ff9691b36ba1de71d2df437267 100644 --- a/paddle/fluid/distributed/ps/service/brpc_ps_server.cc +++ b/paddle/fluid/distributed/ps/service/brpc_ps_server.cc @@ -648,7 +648,6 @@ int32_t BrpcPsService::SaveAllTable(Table *table, PsResponseMessage &response, brpc::Controller *cntl) { auto &table_map = *(_server->GetTable()); - int32_t all_feasign_size = 0; int32_t feasign_size = 0; for (auto &itr : table_map) { @@ -827,7 +826,6 @@ int32_t BrpcPsService::PushGlobalStep(Table *table, set_response_code(response, 0, "run_program data is empty"); return 0; } - uint32_t num = *(const uint32_t *)(request.data().data()); const int64_t *values = (const int64_t *)(request.data().data() + sizeof(uint32_t)); auto trainer_id = request.client_id(); diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.cc b/paddle/fluid/distributed/ps/service/communicator/communicator.cc index cf8d289d4c876fad4561ba9a4a039089918abb74..f40e999186c8d835d11f915964ed568cc06e307c 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.cc +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.cc @@ -658,8 +658,6 @@ void AsyncCommunicator::PushSparseFromTensorAsync( // TODO(zhaocaibei123): check type of show/clk is int? float? uint64? // const long int* show_tensor = shows->data(); // const long int* clk_tensor = clks->data(); - const int64_t *show_tensor = shows->data(); - const int64_t *clk_tensor = clks->data(); for (size_t index = 0; index < inputs->size(); ++index) { framework::LoDTensor *g_tensor = outputs->at(index); diff --git a/paddle/fluid/distributed/ps/service/graph_brpc_client.cc b/paddle/fluid/distributed/ps/service/graph_brpc_client.cc index 65b3cc9d0f89288eb1682f5a77c934c49d7e555e..5b8f946b450a23b0a4a486d69f6ea6c98e1fafd5 100644 --- a/paddle/fluid/distributed/ps/service/graph_brpc_client.cc +++ b/paddle/fluid/distributed/ps/service/graph_brpc_client.cc @@ -491,7 +491,6 @@ std::future GraphBrpcClient::random_sample_nodes( butil::IOBufBytesIterator io_buffer_itr(res_io_buffer); size_t bytes_size = io_buffer_itr.bytes_left(); char *buffer = new char[bytes_size]; - auto size = io_buffer_itr.copy_and_forward((void *)(buffer), bytes_size); size_t index = 0; while (index < bytes_size) { ids.push_back(*(int64_t *)(buffer + index)); diff --git a/paddle/fluid/distributed/ps/service/graph_brpc_server.cc b/paddle/fluid/distributed/ps/service/graph_brpc_server.cc index 8128f2b2adbd92c80e106e2d92f0b48a3d106586..1e9bd45f5963846d287968e541cc83cea4e716f1 100644 --- a/paddle/fluid/distributed/ps/service/graph_brpc_server.cc +++ b/paddle/fluid/distributed/ps/service/graph_brpc_server.cc @@ -498,8 +498,7 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers( } int idx_ = *(int *)(request.params(0).c_str()); - size_t node_num = request.params(1).size() / sizeof(int64_t), - size_of_size_t = sizeof(size_t); + size_t node_num = request.params(1).size() / sizeof(int64_t); int64_t *node_data = (int64_t *)(request.params(1).c_str()); int sample_size = *(int64_t *)(request.params(2).c_str()); bool need_weight = *(int64_t *)(request.params(3).c_str()); @@ -572,7 +571,6 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers( failed[request2server[request_idx]] = true; } else { auto &res_io_buffer = closure->cntl(request_idx)->response_attachment(); - size_t node_size; res[request_idx].reset(new butil::IOBufBytesIterator(res_io_buffer)); size_t num; res[request_idx]->copy_and_forward(&num, sizeof(size_t)); diff --git a/paddle/fluid/distributed/ps/service/ps_local_client.cc b/paddle/fluid/distributed/ps/service/ps_local_client.cc index a52ed1996fff7798a1ca3ad17f842dd8e3941ee1..5903e5783c02aec61dfef82a8f9b78ddb991bd6e 100644 --- a/paddle/fluid/distributed/ps/service/ps_local_client.cc +++ b/paddle/fluid/distributed/ps/service/ps_local_client.cc @@ -283,7 +283,6 @@ int32_t PsLocalClient::Initialize() { size_t table_id, const uint64_t* keys, const float** update_values, size_t num, void* callback) { PSClientClosure* closure = reinterpret_cast(callback); - auto* accessor = GetTableAccessor(table_id); auto* table_ptr = GetTable(table_id); TableContext table_context; @@ -303,7 +302,6 @@ int32_t PsLocalClient::Initialize() { const uint64_t* keys, const float** update_values, size_t num) { - auto* accessor = GetTableAccessor(table_id); auto* table_ptr = GetTable(table_id); TableContext table_context; diff --git a/paddle/fluid/distributed/ps/table/ctr_accessor.cc b/paddle/fluid/distributed/ps/table/ctr_accessor.cc index 98c1baf6befaaf5acf9e59b6f4478031da842000..761b29e9a62bcbc9b23b5a92ba157ccf4d60669c 100644 --- a/paddle/fluid/distributed/ps/table/ctr_accessor.cc +++ b/paddle/fluid/distributed/ps/table/ctr_accessor.cc @@ -61,8 +61,6 @@ void CtrCommonAccessor::InitAccessorInfo() { } bool CtrCommonAccessor::Shrink(float* value) { - auto base_threshold = _config.ctr_accessor_param().base_threshold(); - auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delete_after_unseen_days = _config.ctr_accessor_param().delete_after_unseen_days(); auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); @@ -171,7 +169,6 @@ void CtrCommonAccessor::UpdateStatAfterSave(float* value, int param) { } int32_t CtrCommonAccessor::Create(float** values, size_t num) { - auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* value = values[value_item]; value[common_feature_value.UnseenDaysIndex()] = 0; @@ -245,7 +242,6 @@ int32_t CtrCommonAccessor::Merge(float** update_values, // second dim: field num int32_t CtrCommonAccessor::Update(float** update_values, const float** push_values, size_t num) { - auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* push_value = push_values[value_item]; @@ -330,8 +326,6 @@ std::string CtrCommonAccessor::ParseToString(const float* v, int param) { } int CtrCommonAccessor::ParseFromString(const std::string& str, float* value) { - int embedx_dim = _config.embedx_dim(); - _embedx_sgd_rule->InitValue(value + common_feature_value.EmbedxWIndex(), value + common_feature_value.EmbedxG2SumIndex()); auto ret = paddle::string::str_to_float(str.data(), value); diff --git a/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc b/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc index 791d3f2f95c6e766d97316c897f1042990977e8a..032137dbc0a485b45eff03e035936df8efe58865 100644 --- a/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc +++ b/paddle/fluid/distributed/ps/table/ctr_double_accessor.cc @@ -59,8 +59,6 @@ bool CtrDoubleAccessor::Shrink(float* value) { // auto base_threshold = _config.ctr_accessor_param().base_threshold(); // auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); // auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); - auto base_threshold = _config.ctr_accessor_param().base_threshold(); - auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delete_after_unseen_days = _config.ctr_accessor_param().delete_after_unseen_days(); auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); @@ -170,7 +168,6 @@ void CtrDoubleAccessor::UpdateStatAfterSave(float* value, int param) { } int32_t CtrDoubleAccessor::Create(float** values, size_t num) { - auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* value = values[value_item]; value[CtrDoubleFeatureValue::UnseenDaysIndex()] = 0; @@ -246,7 +243,6 @@ int32_t CtrDoubleAccessor::Merge(float** update_values, // second dim: field num int32_t CtrDoubleAccessor::Update(float** update_values, const float** push_values, size_t num) { - auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* push_value = push_values[value_item]; diff --git a/paddle/fluid/distributed/ps/table/ctr_dymf_accessor.cc b/paddle/fluid/distributed/ps/table/ctr_dymf_accessor.cc index ad95b75aa019d91c646a30063a00b834eee1e621..186c12e9dd4f728ebb9916e40517aee5964d5ef0 100644 --- a/paddle/fluid/distributed/ps/table/ctr_dymf_accessor.cc +++ b/paddle/fluid/distributed/ps/table/ctr_dymf_accessor.cc @@ -62,8 +62,6 @@ void CtrDymfAccessor::InitAccessorInfo() { } bool CtrDymfAccessor::Shrink(float* value) { - auto base_threshold = _config.ctr_accessor_param().base_threshold(); - auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delete_after_unseen_days = _config.ctr_accessor_param().delete_after_unseen_days(); auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); @@ -172,7 +170,6 @@ void CtrDymfAccessor::UpdateStatAfterSave(float* value, int param) { } int32_t CtrDymfAccessor::Create(float** values, size_t num) { - auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* value = values[value_item]; value[common_feature_value.UnseenDaysIndex()] = 0; diff --git a/paddle/fluid/distributed/ps/table/memory_dense_table.cc b/paddle/fluid/distributed/ps/table/memory_dense_table.cc index 3a581b671c4b845e51406711891b6afb6e7a9cd1..6c6563d9ef32e4c3a723e041fa6bdc8fe7a48d83 100644 --- a/paddle/fluid/distributed/ps/table/memory_dense_table.cc +++ b/paddle/fluid/distributed/ps/table/memory_dense_table.cc @@ -249,7 +249,6 @@ int32_t MemoryDenseTable::Load(const std::string& path, float data_buffer[5]; float* data_buff_ptr = data_buffer; std::string line_data; - int size = static_cast(values_.size()); auto common = _config.common(); for (size_t i = start_file_idx; i < end_file_idx + 1; ++i) { @@ -354,7 +353,6 @@ int32_t MemoryDenseTable::Save(const std::string& path, } else { std::ostringstream os; for (int x = 0; x < size; ++x) { - auto& varname = common.params()[x]; int dim = common.dims()[x]; VLOG(3) << "MemoryDenseTable::save dim " << x << " size: " << dim; for (int y = 0; y < dim; ++y) { diff --git a/paddle/fluid/distributed/ps/table/memory_sparse_table.cc b/paddle/fluid/distributed/ps/table/memory_sparse_table.cc index f6a2ad0e02e44bc2aab16343d00c6829036e9226..c4b8b6fc200d317afa6f04b65e27500ba1095347 100644 --- a/paddle/fluid/distributed/ps/table/memory_sparse_table.cc +++ b/paddle/fluid/distributed/ps/table/memory_sparse_table.cc @@ -163,8 +163,6 @@ int32_t MemorySparseTable::LoadLocalFS(const std::string& path, const std::string& param) { std::string table_path = TableDir(path); auto file_list = paddle::framework::localfs_list(table_path); - - int load_param = atoi(param.c_str()); size_t expect_shard_num = _sparse_table_shard_num; if (file_list.size() != expect_shard_num) { LOG(WARNING) << "MemorySparseTable file_size:" << file_list.size() diff --git a/paddle/fluid/distributed/ps/table/sparse_accessor.cc b/paddle/fluid/distributed/ps/table/sparse_accessor.cc index a5435064c6bf91c52b6780484e34b5e964b4478e..8d38daaf9704a0dc6b0ee8e46261be488e37b75c 100644 --- a/paddle/fluid/distributed/ps/table/sparse_accessor.cc +++ b/paddle/fluid/distributed/ps/table/sparse_accessor.cc @@ -55,8 +55,6 @@ void SparseAccessor::InitAccessorInfo() { } bool SparseAccessor::Shrink(float* value) { - auto base_threshold = _config.ctr_accessor_param().base_threshold(); - auto delta_threshold = _config.ctr_accessor_param().delta_threshold(); auto delete_after_unseen_days = _config.ctr_accessor_param().delete_after_unseen_days(); auto delete_threshold = _config.ctr_accessor_param().delete_threshold(); @@ -146,7 +144,6 @@ void SparseAccessor::UpdateStatAfterSave(float* value, int param) { } int32_t SparseAccessor::Create(float** values, size_t num) { - auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* value = values[value_item]; value[sparse_feature_value.UnseenDaysIndex()] = 0; @@ -215,7 +212,6 @@ int32_t SparseAccessor::Merge(float** update_values, // second dim: field num int32_t SparseAccessor::Update(float** update_values, const float** push_values, size_t num) { - auto embedx_dim = _config.embedx_dim(); for (size_t value_item = 0; value_item < num; ++value_item) { float* update_value = update_values[value_item]; const float* push_value = push_values[value_item]; @@ -294,8 +290,6 @@ std::string SparseAccessor::ParseToString(const float* v, int param) { } int SparseAccessor::ParseFromString(const std::string& str, float* value) { - int embedx_dim = _config.embedx_dim(); - _embedx_sgd_rule->InitValue(value + sparse_feature_value.EmbedxWIndex(), value + sparse_feature_value.EmbedxG2SumIndex()); auto ret = paddle::string::str_to_float(str.data(), value); diff --git a/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc b/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc index 76d00f2f017d9975067ac268177a1fa3c1639414..7d8920e7f5c285951454bd22f59e4969d00e1c3a 100644 --- a/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc +++ b/paddle/fluid/distributed/ps/table/ssd_sparse_table.cc @@ -290,9 +290,6 @@ int64_t SSDSparseTable::LocalSize() { local_size += _local_shards[i].size(); } // TODO rocksdb size - uint64_t ssd_size = 0; - // _db->get_estimate_key_num(ssd_size); - // return local_size + ssd_size; return local_size; } @@ -473,7 +470,6 @@ int64_t SSDSparseTable::CacheShuffle( } int shuffle_node_num = _config.sparse_table_cache_file_num(); LOG(INFO) << "Table>> shuffle node num is: " << shuffle_node_num; - size_t file_start_idx = _avg_local_shard_num * _shard_idx; int thread_num = _real_local_shard_num < 20 ? _real_local_shard_num : 20; std::vector< @@ -578,7 +574,6 @@ int32_t SSDSparseTable::SaveCache( return 0; } int save_param = atoi(param.c_str()); // batch_model:0 xbox:1 - size_t file_start_idx = _avg_local_shard_num * _shard_idx; std::string table_path = paddle::string::format_string( "%s/%03d_cache/", path.c_str(), _config.table_id()); _afs_client.remove(paddle::string::format_string( diff --git a/paddle/fluid/distributed/test/barrier_table_test.cc b/paddle/fluid/distributed/test/barrier_table_test.cc index f540939c6fd8f1b9bbe4d63a9103bcc8e2a37bc1..12c389e9766b5c81f4f3d71242e86c2745456454 100644 --- a/paddle/fluid/distributed/test/barrier_table_test.cc +++ b/paddle/fluid/distributed/test/barrier_table_test.cc @@ -26,7 +26,6 @@ namespace paddle { namespace distributed { TEST(BarrierTable, Barrier) { - int emb_dim = 10; int trainers = 2; bool sync = true;