From d5c5bbc33b1733e6569f0ae8e7d792cbcfed47bd Mon Sep 17 00:00:00 2001 From: wangzhen38 <41941775+wangzhen38@users.noreply.github.com> Date: Fri, 13 Jan 2023 11:19:53 +0800 Subject: [PATCH] [cpplint fix] under ps (#49759) * [cpplint fix] under ps --- paddle/fluid/distributed/common/registerer.h | 7 +- .../ps/service/ps_service/graph_py_service.cc | 16 ++--- .../ps/service/ps_service/graph_py_service.h | 12 ++-- .../distributed/ps/table/graph/graph_node.cc | 2 +- .../ps/table/graph/graph_weighted_sampler.cc | 3 +- .../framework/fleet/heter_ps/graph_sampler.h | 2 - .../fleet/heter_ps/graph_sampler_inl.h | 6 +- .../framework/fleet/heter_ps/test_comm.cu | 2 +- .../fleet/heter_ps/test_cpu_graph_sample.cu | 7 +- .../fleet/heter_ps/test_cpu_query.cu | 68 +++++++++---------- .../framework/fleet/heter_ps/test_graph.cu | 7 +- .../fleet/heter_ps/test_sample_rate.cu | 35 ++++++---- paddle/fluid/framework/fleet/heter_wrapper.cc | 11 +-- paddle/fluid/framework/fleet/heter_wrapper.h | 6 +- .../cpu/graph_sample_neighbors_kernel.cc | 8 +-- 15 files changed, 101 insertions(+), 91 deletions(-) diff --git a/paddle/fluid/distributed/common/registerer.h b/paddle/fluid/distributed/common/registerer.h index f4938c0f93f..663119a8e56 100644 --- a/paddle/fluid/distributed/common/registerer.h +++ b/paddle/fluid/distributed/common/registerer.h @@ -29,7 +29,8 @@ class Any { Any() : content_(NULL) {} template - Any(const ValueType &value) : content_(new Holder(value)) {} + explicit Any(const ValueType &value) + : content_(new Holder(value)) {} Any(const Any &other) : content_(other.content_ ? other.content_->clone() : NULL) {} @@ -38,7 +39,9 @@ class Any { template ValueType *any_cast() { - return content_ ? &static_cast *>(content_)->held_ : NULL; + return content_ + ? &static_cast *>(content_)->held_ // NOLINT + : NULL; } private: diff --git a/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc b/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc index 83a21ccf7b0..47a8e2e18d7 100644 --- a/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc +++ b/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc @@ -23,7 +23,7 @@ #include "paddle/fluid/platform/profiler/event_tracing.h" namespace paddle { namespace distributed { -std::vector GraphPyService::split(std::string& str, +std::vector GraphPyService::split(const std::string& str, const char pattern) { std::vector res; std::stringstream input(str); @@ -44,7 +44,7 @@ void GraphPyService::add_table_feat_conf(std::string table_name, if (table_feat_mapping[idx].find(feat_name) == table_feat_mapping[idx].end()) { VLOG(0) << "for table name not found,make a new one"; - int res = (int)table_feat_mapping[idx].size(); + int res = static_cast(table_feat_mapping[idx].size()); table_feat_mapping[idx][feat_name] = res; VLOG(0) << "seq id = " << table_feat_mapping[idx][feat_name]; } @@ -72,8 +72,8 @@ void add_graph_node(std::string name, void remove_graph_node(std::string name, std::vector node_ids) {} void GraphPyService::set_up(std::string ips_str, int shard_num, - std::vector node_types, - std::vector edge_types) { + const std::vector node_types, + const std::vector edge_types) { set_shard_num(shard_num); set_num_node_types(node_types.size()); /* @@ -86,12 +86,12 @@ void GraphPyService::set_up(std::string ips_str, */ id_to_edge = edge_types; for (size_t table_id = 0; table_id < edge_types.size(); table_id++) { - int res = (int)edge_to_id.size(); + int res = static_cast(edge_to_id.size()); edge_to_id[edge_types[table_id]] = res; } id_to_feature = node_types; for (size_t table_id = 0; table_id < node_types.size(); table_id++) { - int res = (int)feature_to_id.size(); + int res = static_cast(feature_to_id.size()); feature_to_id[node_types[table_id]] = res; } table_feat_mapping.resize(node_types.size()); @@ -312,8 +312,8 @@ void GraphPyClient::clear_nodes(std::string name) { } void GraphPyClient::add_graph_node(std::string name, - std::vector& node_ids, - std::vector& weight_list) { + std::vector& node_ids, // NOLINT + std::vector& weight_list) { // NOLINT // if (this->table_id_map.count(name)) { // uint32_t table_id = this->table_id_map[name]; // auto status = diff --git a/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h b/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h index 4e915ab50fe..33490281981 100644 --- a/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h +++ b/paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h @@ -116,7 +116,7 @@ class GraphPyService { this->num_node_types = num_node_types; } int get_server_size(int server_size) { return server_size; } - std::vector split(std::string& str, const char pattern); + std::vector split(const std::string& str, const char pattern); void set_up(std::string ips_str, int shard_num, std::vector node_types, @@ -165,7 +165,8 @@ class GraphPyClient : public GraphPyService { std::shared_ptr get_ps_client() { return worker_ptr; } - void bind_local_server(int local_channel_index, GraphPyServer& server) { + void bind_local_server(int local_channel_index, + GraphPyServer& server) { // NOLINT worker_ptr->set_local_channel(local_channel_index); worker_ptr->set_local_graph_service( (paddle::distributed::GraphBrpcService*)server.get_ps_server() @@ -177,9 +178,10 @@ class GraphPyClient : public GraphPyService { void load_node_file(std::string name, std::string filepath); void clear_nodes(std::string name); void add_graph_node(std::string name, - std::vector& node_ids, - std::vector& weight_list); - void remove_graph_node(std::string name, std::vector& node_ids); + std::vector& node_ids, // NOLINT + std::vector& weight_list); // NOLINT + void remove_graph_node(std::string name, + std::vector& node_ids); // NOLINT int get_client_id() { return client_id; } void set_client_id(int client_id) { this->client_id = client_id; } void start_client(); diff --git a/paddle/fluid/distributed/ps/table/graph/graph_node.cc b/paddle/fluid/distributed/ps/table/graph/graph_node.cc index d966bd69653..c505d49ab06 100644 --- a/paddle/fluid/distributed/ps/table/graph/graph_node.cc +++ b/paddle/fluid/distributed/ps/table/graph/graph_node.cc @@ -110,7 +110,7 @@ void FeatureNode::recover_from_buffer(char* buffer) { memcpy(&feat_len, buffer, sizeof(int)); buffer += sizeof(int); - char str[feat_len + 1]; + char str[feat_len + 1]; // NOLINT memcpy(str, buffer, feat_len); buffer += feat_len; str[feat_len] = '\0'; diff --git a/paddle/fluid/distributed/ps/table/graph/graph_weighted_sampler.cc b/paddle/fluid/distributed/ps/table/graph/graph_weighted_sampler.cc index 0f3bbd4e346..8f249447747 100644 --- a/paddle/fluid/distributed/ps/table/graph/graph_weighted_sampler.cc +++ b/paddle/fluid/distributed/ps/table/graph/graph_weighted_sampler.cc @@ -84,7 +84,8 @@ void WeightedSampler::build(GraphEdgeBlob *edges) { delete right; right = nullptr; } - return build_one((WeightedGraphEdgeBlob *)edges, 0, edges->size()); + return build_one( + reinterpret_cast(edges), 0, edges->size()); } void WeightedSampler::build_one(WeightedGraphEdgeBlob *edges, diff --git a/paddle/fluid/framework/fleet/heter_ps/graph_sampler.h b/paddle/fluid/framework/fleet/heter_ps/graph_sampler.h index fdde8eb064b..6e7d0ba9ca7 100644 --- a/paddle/fluid/framework/fleet/heter_ps/graph_sampler.h +++ b/paddle/fluid/framework/fleet/heter_ps/graph_sampler.h @@ -63,9 +63,7 @@ class GraphSampler { } ~GraphSampler() { end_graph_sampling(); } virtual int load_from_ssd(std::string path) = 0; - ; virtual int run_graph_sampling() = 0; - ; virtual void init(GpuPsGraphTable *gpu_table, std::vector args_) = 0; std::shared_ptr<::ThreadPool> thread_pool; diff --git a/paddle/fluid/framework/fleet/heter_ps/graph_sampler_inl.h b/paddle/fluid/framework/fleet/heter_ps/graph_sampler_inl.h index 9ad5898757c..2656e0a2b00 100644 --- a/paddle/fluid/framework/fleet/heter_ps/graph_sampler_inl.h +++ b/paddle/fluid/framework/fleet/heter_ps/graph_sampler_inl.h @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifdef PADDLE_WITH_HETERPS +#pragma once namespace paddle { namespace framework { int CommonGraphSampler::load_from_ssd(std::string path) { @@ -30,9 +30,9 @@ int CommonGraphSampler::load_from_ssd(std::string path) { } auto src_id = std::stoll(values[0]); _db->put(0, - (char *)&src_id, + reinterpret_cast(&src_id), sizeof(uint64_t), - (char *)neighbor_data.data(), + reinterpret_cast(neighbor_data.data()), sizeof(uint64_t) * neighbor_data.size()); int gpu_shard = src_id % gpu_num; if (gpu_edges_count[gpu_shard] + neighbor_data.size() <= diff --git a/paddle/fluid/framework/fleet/heter_ps/test_comm.cu b/paddle/fluid/framework/fleet/heter_ps/test_comm.cu index 72fa0282066..ea553f40ebd 100644 --- a/paddle/fluid/framework/fleet/heter_ps/test_comm.cu +++ b/paddle/fluid/framework/fleet/heter_ps/test_comm.cu @@ -22,7 +22,7 @@ limitations under the License. */ #include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h" #include "paddle/fluid/platform/cuda_device_guard.h" -using namespace paddle::framework; +using paddle::framework; TEST(TEST_FLEET, heter_comm) { int gpu_count = 3; diff --git a/paddle/fluid/framework/fleet/heter_ps/test_cpu_graph_sample.cu b/paddle/fluid/framework/fleet/heter_ps/test_cpu_graph_sample.cu index 868cb6937f5..fa3d0bd244a 100644 --- a/paddle/fluid/framework/fleet/heter_ps/test_cpu_graph_sample.cu +++ b/paddle/fluid/framework/fleet/heter_ps/test_cpu_graph_sample.cu @@ -24,7 +24,7 @@ #include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h" #include "paddle/fluid/platform/cuda_device_guard.h" -using namespace paddle::framework; +using paddle::framework; void prepare_file(char file_name[], std::vector data) { std::ofstream ofile; ofile.open(file_name); @@ -91,9 +91,10 @@ TEST(TEST_FLEET, graph_sample) { */ int64_t cpu_key[3] = {7, 0, 6}; void *key; - cudaMalloc((void **)&key, 3 * sizeof(int64_t)); + cudaMalloc(reinterpret_cast(&key), 3 * sizeof(int64_t)); cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice); - auto neighbor_sample_res = g.graph_neighbor_sample(0, (int64_t *)key, 3, 3); + auto neighbor_sample_res = + g.graph_neighbor_sample(0, reinterpret_cast(key), 3, 3); int64_t *res = new int64_t[7]; /* cudaMemcpy(res, neighbor_sample_res->val, 56, cudaMemcpyDeviceToHost); diff --git a/paddle/fluid/framework/fleet/heter_ps/test_cpu_query.cu b/paddle/fluid/framework/fleet/heter_ps/test_cpu_query.cu index 78297ce292c..37c6a8af5f7 100644 --- a/paddle/fluid/framework/fleet/heter_ps/test_cpu_query.cu +++ b/paddle/fluid/framework/fleet/heter_ps/test_cpu_query.cu @@ -28,44 +28,42 @@ using namespace paddle::framework; // NOLINT namespace platform = paddle::platform; -std::string edges[] = { - // NOLINT - std::string("0\t1"), - std::string("0\t9"), - std::string("1\t2"), - std::string("1\t0"), - std::string("2\t1"), - std::string("2\t3"), - std::string("3\t2"), - std::string("3\t4"), - std::string("4\t3"), - std::string("4\t5"), - std::string("5\t4"), - std::string("5\t6"), - std::string("6\t5"), - std::string("6\t7"), - std::string("7\t6"), - std::string("7\t8"), +const char *edges[] = { + "0\t1", + "0\t9", + "1\t2", + "1\t0", + "2\t1", + "2\t3", + "3\t2", + "3\t4", + "4\t3", + "4\t5", + "5\t4", + "5\t6", + "6\t5", + "6\t7", + "7\t6", + "7\t8", }; char edge_file_name[] = "edges1.txt"; -std::string nodes[] = { // NOLINT - std::string("user\t37\ta 0.34\tb 13 14\tc hello\td abc"), - std::string("user\t96\ta 0.31\tb 15 10\tc 96hello\td abcd"), - std::string("user\t59\ta 0.11\tb 11 14"), - std::string("user\t97\ta 0.11\tb 12 11"), - std::string("item\t45\ta 0.21"), - std::string("item\t145\ta 0.21"), - std::string("item\t112\ta 0.21"), - std::string("item\t48\ta 0.21"), - std::string("item\t247\ta 0.21"), - std::string("item\t111\ta 0.21"), - std::string("item\t46\ta 0.21"), - std::string("item\t146\ta 0.21"), - std::string("item\t122\ta 0.21"), - std::string("item\t49\ta 0.21"), - std::string("item\t248\ta 0.21"), - std::string("item\t113\ta 0.21")}; +const char *nodes[] = {"user\t37\ta 0.34\tb 13 14\tc hello\td abc", + "user\t96\ta 0.31\tb 15 10\tc 96hello\td abcd", + "user\t59\ta 0.11\tb 11 14", + "user\t97\ta 0.11\tb 12 11", + "item\t45\ta 0.21", + "item\t145\ta 0.21", + "item\t112\ta 0.21", + "item\t48\ta 0.21", + "item\t247\ta 0.21", + "item\t111\ta 0.21", + "item\t46\ta 0.21", + "item\t146\ta 0.21", + "item\t122\ta 0.21", + "item\t49\ta 0.21", + "item\t248\ta 0.21", + "item\t113\ta 0.21"}; char node_file_name[] = "nodes.txt"; std::vector user_feature_name = {"a", "b", "c", "d"}; std::vector item_feature_name = {"a"}; diff --git a/paddle/fluid/framework/fleet/heter_ps/test_graph.cu b/paddle/fluid/framework/fleet/heter_ps/test_graph.cu index 788cf932737..576fc911ee1 100644 --- a/paddle/fluid/framework/fleet/heter_ps/test_graph.cu +++ b/paddle/fluid/framework/fleet/heter_ps/test_graph.cu @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h" #include "paddle/fluid/platform/cuda_device_guard.h" -using namespace paddle::framework; +using paddle::framework; TEST(TEST_FLEET, graph_comm) { int gpu_count = 3; std::vector dev_ids; @@ -100,9 +100,10 @@ TEST(TEST_FLEET, graph_comm) { int64_t cpu_key[3] = {7, 0, 6}; void *key; - cudaMalloc((void **)&key, 3 * sizeof(int64_t)); + cudaMalloc(reinterpret_cast(&key), 3 * sizeof(int64_t)); cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice); - auto neighbor_sample_res = g.graph_neighbor_sample(0, (int64_t *)key, 3, 3); + auto neighbor_sample_res = + g.graph_neighbor_sample(0, reinterpret_cast(key), 3, 3); res = new int64_t[7]; cudaMemcpy(res, neighbor_sample_res->val, 56, cudaMemcpyDeviceToHost); int *actual_sample_size = new int[3]; diff --git a/paddle/fluid/framework/fleet/heter_ps/test_sample_rate.cu b/paddle/fluid/framework/fleet/heter_ps/test_sample_rate.cu index 5fc0625992c..be4ea813719 100644 --- a/paddle/fluid/framework/fleet/heter_ps/test_sample_rate.cu +++ b/paddle/fluid/framework/fleet/heter_ps/test_sample_rate.cu @@ -46,19 +46,19 @@ #include "paddle/fluid/string/printf.h" #include "paddle/phi/kernels/funcs/math_function.h" -using namespace paddle::framework; +using paddle::framework; namespace platform = paddle::platform; namespace operators = paddle::operators; namespace memory = paddle::memory; namespace distributed = paddle::distributed; -std::string input_file; +const char *input_file; int exe_count = 100; int use_nv = 1; int fixed_key_size = 50000, sample_size = 32, bfs_sample_nodes_in_each_shard = 10000, init_search_size = 1, bfs_sample_edges = 20, gpu_num1 = 8, gpu_num = 8; -std::string gpu_str = "0,1,2,3,4,5,6,7"; +const char *gpu_str = "0,1,2,3,4,5,6,7"; int64_t *key[8]; std::vector edges = {std::string("37\t45\t0.34"), std::string("37\t145\t0.31"), @@ -115,8 +115,8 @@ void testSampleRate() { index += node.get_size(false); // res.push_back(node); ids.push_back(node.get_id()); - int swap_pos = rand() % ids.size(); - std::swap(ids[swap_pos], ids[(int)ids.size() - 1]); + int swap_pos = rand_r() % ids.size(); + std::swap(ids[swap_pos], ids[static_cast(ids.size()) - 1]); } cur = ids.size(); // if (sample_actual_size == 0) break; @@ -161,8 +161,8 @@ void testSampleRate() { actual_size[i].push_back(ac[j - s] / sizeof(int64_t)); int ss = ac[j - s] / sizeof(int64_t); for (int k = 0; k < ss; k++) { - sample_neighbors[i].push_back( - *((int64_t *)(buffers[j - s].get() + k * sizeof(int64_t)))); + sample_neighbors[i].push_back(*(reinterpret_cast( + buffers[j - s].get() + k * sizeof(int64_t)))); } } } @@ -252,7 +252,8 @@ void testSampleRate() { */ for (int i = 0; i < gpu_num1; i++) { platform::CUDADeviceGuard guard(device_id_mapping[i]); - cudaMalloc((void **)&key[i], ids.size() * sizeof(int64_t)); + cudaMalloc(reinterpret_cast(&key[i]), + ids.size() * sizeof(int64_t)); cudaMemcpy(key[i], ids.data(), ids.size() * sizeof(int64_t), @@ -285,16 +286,16 @@ void testSampleRate() { for (int k = 0; k < exe_count; k++) { st = 0; while (st < size) { - int len = std::min(fixed_key_size, (int)ids.size() - st); + int len = std::min(fixed_key_size, static_cast(ids.size()) - st); auto r = g.graph_neighbor_sample( - i, (int64_t *)(key[i] + st), sample_size, len); + i, reinterpret_cast(key[i] + st), sample_size, len); st += len; delete r; } } }; auto start1 = std::chrono::steady_clock::now(); - std::thread thr[gpu_num1]; + std::thread thr[gpu_num1]; // NOLINT for (int i = 0; i < gpu_num1; i++) { thr[i] = std::thread(func, i); } @@ -313,16 +314,20 @@ void testSampleRate() { for (int k = 0; k < exe_count; k++) { st = 0; while (st < size) { - int len = std::min(fixed_key_size, (int)ids.size() - st); - auto r = g.graph_neighbor_sample_v2( - i, (int64_t *)(key[i] + st), sample_size, len, false); + int len = std::min(fixed_key_size, static_cast(ids.size()) - st); + auto r = + g.graph_neighbor_sample_v2(i, + reinterpret_cast(key[i] + st), + sample_size, + len, + false); st += len; delete r; } } }; auto start2 = std::chrono::steady_clock::now(); - std::thread thr2[gpu_num1]; + std::thread thr2[gpu_num1]; // NOLINT for (int i = 0; i < gpu_num1; i++) { thr2[i] = std::thread(func2, i); } diff --git a/paddle/fluid/framework/fleet/heter_wrapper.cc b/paddle/fluid/framework/fleet/heter_wrapper.cc index 51f9a445f78..9d83d519d09 100644 --- a/paddle/fluid/framework/fleet/heter_wrapper.cc +++ b/paddle/fluid/framework/fleet/heter_wrapper.cc @@ -286,7 +286,7 @@ void HeterWrapper::EndPass(Scope* scope, int num) { void HeterWrapper::CallRemoteXpu(std::shared_ptr task, HeterCpuWorker* worker, int mpi_rank, - std::vector& send_vars) { + const std::vector& send_vars) { HeterRequest request; request.set_cmd(0); request.set_cur_batch(task->cur_batch_); @@ -329,10 +329,11 @@ void HeterWrapper::CallRemoteXpu(std::shared_ptr task, stub.service(&done->cntl, &request, &done->response, done); } -void HeterWrapper::CallRemoteXpuSync(std::shared_ptr task, - HeterCpuWorker* worker, - int mpi_rank, - std::vector& send_vars) { +void HeterWrapper::CallRemoteXpuSync( + std::shared_ptr task, + HeterCpuWorker* worker, + int mpi_rank, + const std::vector& send_vars) { HeterRequest request; HeterResponse response; brpc::Controller cntl; diff --git a/paddle/fluid/framework/fleet/heter_wrapper.h b/paddle/fluid/framework/fleet/heter_wrapper.h index 12f80cc4de9..77838fbec6d 100644 --- a/paddle/fluid/framework/fleet/heter_wrapper.h +++ b/paddle/fluid/framework/fleet/heter_wrapper.h @@ -40,7 +40,7 @@ typedef std::function HeterRpcCallbackFunc; class OnHeterRpcDone : public google::protobuf::Closure { public: - OnHeterRpcDone(HeterRpcCallbackFunc func) : handler_(func) {} + explicit OnHeterRpcDone(HeterRpcCallbackFunc func) : handler_(func) {} virtual ~OnHeterRpcDone() {} void Run() { std::unique_ptr self_guard(this); @@ -75,12 +75,12 @@ class HeterWrapper { void CallRemoteXpu(std::shared_ptr task, HeterCpuWorker* worker, int mpi_rank, - std::vector& send_vars); + const std::vector& send_vars); void CallRemoteXpuSync(std::shared_ptr task, HeterCpuWorker* worker, int mpi_rank, - std::vector& send_vars); + const std::vector& send_vars); void StopXpuService(int num); diff --git a/paddle/phi/kernels/cpu/graph_sample_neighbors_kernel.cc b/paddle/phi/kernels/cpu/graph_sample_neighbors_kernel.cc index 1ef5373d631..f8fefa3450c 100644 --- a/paddle/phi/kernels/cpu/graph_sample_neighbors_kernel.cc +++ b/paddle/phi/kernels/cpu/graph_sample_neighbors_kernel.cc @@ -26,8 +26,8 @@ void SampleUniqueNeighbors( bidiiter begin, bidiiter end, int num_samples, - std::mt19937& rng, - std::uniform_int_distribution& dice_distribution) { + std::mt19937& rng, // NOLINT + std::uniform_int_distribution& dice_distribution) { // NOLINT int left_num = std::distance(begin, end); for (int i = 0; i < num_samples; i++) { bidiiter r = begin; @@ -46,8 +46,8 @@ void SampleUniqueNeighborsWithEids( bidiiter eid_begin, bidiiter eid_end, int num_samples, - std::mt19937& rng, - std::uniform_int_distribution& dice_distribution) { + std::mt19937& rng, // NOLINT + std::uniform_int_distribution& dice_distribution) { // NOLINT int left_num = std::distance(src_begin, src_end); for (int i = 0; i < num_samples; i++) { bidiiter r1 = src_begin, r2 = eid_begin; -- GitLab