diff --git a/paddle/fluid/distributed/service/graph_py_service.h b/paddle/fluid/distributed/service/graph_py_service.h index e185f23e3d240ff59d75a921dd5112f3b8a7851c..c6657be96ba446d2f7538943aab43dd47e1868fb 100644 --- a/paddle/fluid/distributed/service/graph_py_service.h +++ b/paddle/fluid/distributed/service/graph_py_service.h @@ -54,19 +54,7 @@ class GraphPyService { std::vector table_feat_conf_feat_dtype; std::vector table_feat_conf_feat_shape; - // std::thread *server_thread, *client_thread; - - // std::shared_ptr pserver_ptr; - - // std::shared_ptr worker_ptr; - public: - // std::shared_ptr get_ps_server() { - // return pserver_ptr; - // } - // std::shared_ptr get_ps_client() { - // return worker_ptr; - // } int get_shard_num() { return shard_num; } void set_shard_num(int shard_num) { this->shard_num = shard_num; } void GetDownpourSparseTableProto( diff --git a/paddle/fluid/distributed/table/common_graph_table.cc b/paddle/fluid/distributed/table/common_graph_table.cc index 020bcdcc52ef4b023fbb7b263517f67ef4abaf0b..0dc99de1bfe82a691fdacb834acd1ad606dcb04b 100644 --- a/paddle/fluid/distributed/table/common_graph_table.cc +++ b/paddle/fluid/distributed/table/common_graph_table.cc @@ -171,7 +171,7 @@ int32_t GraphTable::load_nodes(const std::string &path, std::string node_type) { int32_t GraphTable::load_edges(const std::string &path, bool reverse_edge) { auto paths = paddle::string::split_string(path, ";"); - int count = 0; + int64_t count = 0; std::string sample_type = "random"; bool is_weighted = false; int valid_count = 0; diff --git a/paddle/fluid/distributed/table/common_graph_table.h b/paddle/fluid/distributed/table/common_graph_table.h index 8ddf3c8f904a6cab0e5826118ce9650bf8f6e2af..b18da82abe61c9695712f542e187ac48fd5edc9d 100644 --- a/paddle/fluid/distributed/table/common_graph_table.h +++ b/paddle/fluid/distributed/table/common_graph_table.h @@ -33,26 +33,11 @@ namespace paddle { namespace distributed { class GraphShard { public: - // static int bucket_low_bound; - // static int gcd(int s, int t) { - // if (s % t == 0) return t; - // return gcd(t, s % t); - // } size_t get_size(); GraphShard() {} - GraphShard(int shard_num) { - this->shard_num = shard_num; - // bucket_size = init_bucket_size(shard_num); - // bucket.resize(bucket_size); - } + GraphShard(int shard_num) { this->shard_num = shard_num; } std::vector &get_bucket() { return bucket; } std::vector get_batch(int start, int end, int step); - // int init_bucket_size(int shard_num) { - // for (int i = bucket_low_bound;; i++) { - // if (gcd(i, shard_num) == 1) return i; - // } - // return -1; - // } std::vector get_ids_by_range(int start, int end) { std::vector res; for (int i = start; i < end && i < bucket.size(); i++) { @@ -64,7 +49,6 @@ class GraphShard { FeatureNode *add_feature_node(uint64_t id); Node *find_node(uint64_t id); void add_neighboor(uint64_t id, uint64_t dst_id, float weight); - // std::unordered_map::iterator> std::unordered_map get_node_location() { return node_location; } @@ -131,7 +115,7 @@ class GraphTable : public SparseTable { protected: std::vector shards; size_t shard_start, shard_end, server_num, shard_num_per_table, shard_num; - const int task_pool_size_ = 11; + const int task_pool_size_ = 24; const int random_sample_nodes_ranges = 3; std::vector feat_name;