未验证 提交 d5c5bbc3 编写于 作者: W wangzhen38 提交者: GitHub

[cpplint fix] under ps (#49759)

* [cpplint fix] under ps
上级 8447f876
......@@ -29,7 +29,8 @@ class Any {
Any() : content_(NULL) {}
template <typename ValueType>
Any(const ValueType &value) : content_(new Holder<ValueType>(value)) {}
explicit Any(const ValueType &value)
: content_(new Holder<ValueType>(value)) {}
Any(const Any &other)
: content_(other.content_ ? other.content_->clone() : NULL) {}
......@@ -38,7 +39,9 @@ class Any {
template <typename ValueType>
ValueType *any_cast() {
return content_ ? &static_cast<Holder<ValueType> *>(content_)->held_ : NULL;
return content_
? &static_cast<Holder<ValueType> *>(content_)->held_ // NOLINT
: NULL;
}
private:
......
......@@ -23,7 +23,7 @@
#include "paddle/fluid/platform/profiler/event_tracing.h"
namespace paddle {
namespace distributed {
std::vector<std::string> GraphPyService::split(std::string& str,
std::vector<std::string> GraphPyService::split(const std::string& str,
const char pattern) {
std::vector<std::string> res;
std::stringstream input(str);
......@@ -44,7 +44,7 @@ void GraphPyService::add_table_feat_conf(std::string table_name,
if (table_feat_mapping[idx].find(feat_name) ==
table_feat_mapping[idx].end()) {
VLOG(0) << "for table name not found,make a new one";
int res = (int)table_feat_mapping[idx].size();
int res = static_cast<int>(table_feat_mapping[idx].size());
table_feat_mapping[idx][feat_name] = res;
VLOG(0) << "seq id = " << table_feat_mapping[idx][feat_name];
}
......@@ -72,8 +72,8 @@ void add_graph_node(std::string name,
void remove_graph_node(std::string name, std::vector<int64_t> node_ids) {}
void GraphPyService::set_up(std::string ips_str,
int shard_num,
std::vector<std::string> node_types,
std::vector<std::string> edge_types) {
const std::vector<std::string> node_types,
const std::vector<std::string> edge_types) {
set_shard_num(shard_num);
set_num_node_types(node_types.size());
/*
......@@ -86,12 +86,12 @@ void GraphPyService::set_up(std::string ips_str,
*/
id_to_edge = edge_types;
for (size_t table_id = 0; table_id < edge_types.size(); table_id++) {
int res = (int)edge_to_id.size();
int res = static_cast<int>(edge_to_id.size());
edge_to_id[edge_types[table_id]] = res;
}
id_to_feature = node_types;
for (size_t table_id = 0; table_id < node_types.size(); table_id++) {
int res = (int)feature_to_id.size();
int res = static_cast<int>(feature_to_id.size());
feature_to_id[node_types[table_id]] = res;
}
table_feat_mapping.resize(node_types.size());
......@@ -312,8 +312,8 @@ void GraphPyClient::clear_nodes(std::string name) {
}
void GraphPyClient::add_graph_node(std::string name,
std::vector<int64_t>& node_ids,
std::vector<bool>& weight_list) {
std::vector<int64_t>& node_ids, // NOLINT
std::vector<bool>& weight_list) { // NOLINT
// if (this->table_id_map.count(name)) {
// uint32_t table_id = this->table_id_map[name];
// auto status =
......
......@@ -116,7 +116,7 @@ class GraphPyService {
this->num_node_types = num_node_types;
}
int get_server_size(int server_size) { return server_size; }
std::vector<std::string> split(std::string& str, const char pattern);
std::vector<std::string> split(const std::string& str, const char pattern);
void set_up(std::string ips_str,
int shard_num,
std::vector<std::string> node_types,
......@@ -165,7 +165,8 @@ class GraphPyClient : public GraphPyService {
std::shared_ptr<paddle::distributed::GraphBrpcClient> get_ps_client() {
return worker_ptr;
}
void bind_local_server(int local_channel_index, GraphPyServer& server) {
void bind_local_server(int local_channel_index,
GraphPyServer& server) { // NOLINT
worker_ptr->set_local_channel(local_channel_index);
worker_ptr->set_local_graph_service(
(paddle::distributed::GraphBrpcService*)server.get_ps_server()
......@@ -177,9 +178,10 @@ class GraphPyClient : public GraphPyService {
void load_node_file(std::string name, std::string filepath);
void clear_nodes(std::string name);
void add_graph_node(std::string name,
std::vector<int64_t>& node_ids,
std::vector<bool>& weight_list);
void remove_graph_node(std::string name, std::vector<int64_t>& node_ids);
std::vector<int64_t>& node_ids, // NOLINT
std::vector<bool>& weight_list); // NOLINT
void remove_graph_node(std::string name,
std::vector<int64_t>& node_ids); // NOLINT
int get_client_id() { return client_id; }
void set_client_id(int client_id) { this->client_id = client_id; }
void start_client();
......
......@@ -110,7 +110,7 @@ void FeatureNode::recover_from_buffer(char* buffer) {
memcpy(&feat_len, buffer, sizeof(int));
buffer += sizeof(int);
char str[feat_len + 1];
char str[feat_len + 1]; // NOLINT
memcpy(str, buffer, feat_len);
buffer += feat_len;
str[feat_len] = '\0';
......
......@@ -84,7 +84,8 @@ void WeightedSampler::build(GraphEdgeBlob *edges) {
delete right;
right = nullptr;
}
return build_one((WeightedGraphEdgeBlob *)edges, 0, edges->size());
return build_one(
reinterpret_cast<WeightedGraphEdgeBlob *>(edges), 0, edges->size());
}
void WeightedSampler::build_one(WeightedGraphEdgeBlob *edges,
......
......@@ -63,9 +63,7 @@ class GraphSampler {
}
~GraphSampler() { end_graph_sampling(); }
virtual int load_from_ssd(std::string path) = 0;
;
virtual int run_graph_sampling() = 0;
;
virtual void init(GpuPsGraphTable *gpu_table,
std::vector<std::string> args_) = 0;
std::shared_ptr<::ThreadPool> thread_pool;
......
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_HETERPS
#pragma once
namespace paddle {
namespace framework {
int CommonGraphSampler::load_from_ssd(std::string path) {
......@@ -30,9 +30,9 @@ int CommonGraphSampler::load_from_ssd(std::string path) {
}
auto src_id = std::stoll(values[0]);
_db->put(0,
(char *)&src_id,
reinterpret_cast<char *>(&src_id),
sizeof(uint64_t),
(char *)neighbor_data.data(),
reinterpret_cast<char *>(neighbor_data.data()),
sizeof(uint64_t) * neighbor_data.size());
int gpu_shard = src_id % gpu_num;
if (gpu_edges_count[gpu_shard] + neighbor_data.size() <=
......
......@@ -22,7 +22,7 @@ limitations under the License. */
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using namespace paddle::framework;
using paddle::framework;
TEST(TEST_FLEET, heter_comm) {
int gpu_count = 3;
......
......@@ -24,7 +24,7 @@
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using namespace paddle::framework;
using paddle::framework;
void prepare_file(char file_name[], std::vector<std::string> data) {
std::ofstream ofile;
ofile.open(file_name);
......@@ -91,9 +91,10 @@ TEST(TEST_FLEET, graph_sample) {
*/
int64_t cpu_key[3] = {7, 0, 6};
void *key;
cudaMalloc((void **)&key, 3 * sizeof(int64_t));
cudaMalloc(reinterpret_cast<void **>(&key), 3 * sizeof(int64_t));
cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice);
auto neighbor_sample_res = g.graph_neighbor_sample(0, (int64_t *)key, 3, 3);
auto neighbor_sample_res =
g.graph_neighbor_sample(0, reinterpret_cast<int64_t **>(key), 3, 3);
int64_t *res = new int64_t[7];
/*
cudaMemcpy(res, neighbor_sample_res->val, 56, cudaMemcpyDeviceToHost);
......
......@@ -28,44 +28,42 @@
using namespace paddle::framework; // NOLINT
namespace platform = paddle::platform;
std::string edges[] = {
// NOLINT
std::string("0\t1"),
std::string("0\t9"),
std::string("1\t2"),
std::string("1\t0"),
std::string("2\t1"),
std::string("2\t3"),
std::string("3\t2"),
std::string("3\t4"),
std::string("4\t3"),
std::string("4\t5"),
std::string("5\t4"),
std::string("5\t6"),
std::string("6\t5"),
std::string("6\t7"),
std::string("7\t6"),
std::string("7\t8"),
const char *edges[] = {
"0\t1",
"0\t9",
"1\t2",
"1\t0",
"2\t1",
"2\t3",
"3\t2",
"3\t4",
"4\t3",
"4\t5",
"5\t4",
"5\t6",
"6\t5",
"6\t7",
"7\t6",
"7\t8",
};
char edge_file_name[] = "edges1.txt";
std::string nodes[] = { // NOLINT
std::string("user\t37\ta 0.34\tb 13 14\tc hello\td abc"),
std::string("user\t96\ta 0.31\tb 15 10\tc 96hello\td abcd"),
std::string("user\t59\ta 0.11\tb 11 14"),
std::string("user\t97\ta 0.11\tb 12 11"),
std::string("item\t45\ta 0.21"),
std::string("item\t145\ta 0.21"),
std::string("item\t112\ta 0.21"),
std::string("item\t48\ta 0.21"),
std::string("item\t247\ta 0.21"),
std::string("item\t111\ta 0.21"),
std::string("item\t46\ta 0.21"),
std::string("item\t146\ta 0.21"),
std::string("item\t122\ta 0.21"),
std::string("item\t49\ta 0.21"),
std::string("item\t248\ta 0.21"),
std::string("item\t113\ta 0.21")};
const char *nodes[] = {"user\t37\ta 0.34\tb 13 14\tc hello\td abc",
"user\t96\ta 0.31\tb 15 10\tc 96hello\td abcd",
"user\t59\ta 0.11\tb 11 14",
"user\t97\ta 0.11\tb 12 11",
"item\t45\ta 0.21",
"item\t145\ta 0.21",
"item\t112\ta 0.21",
"item\t48\ta 0.21",
"item\t247\ta 0.21",
"item\t111\ta 0.21",
"item\t46\ta 0.21",
"item\t146\ta 0.21",
"item\t122\ta 0.21",
"item\t49\ta 0.21",
"item\t248\ta 0.21",
"item\t113\ta 0.21"};
char node_file_name[] = "nodes.txt";
std::vector<std::string> user_feature_name = {"a", "b", "c", "d"};
std::vector<std::string> item_feature_name = {"a"};
......
......@@ -23,7 +23,7 @@ limitations under the License. */
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using namespace paddle::framework;
using paddle::framework;
TEST(TEST_FLEET, graph_comm) {
int gpu_count = 3;
std::vector<int> dev_ids;
......@@ -100,9 +100,10 @@ TEST(TEST_FLEET, graph_comm) {
int64_t cpu_key[3] = {7, 0, 6};
void *key;
cudaMalloc((void **)&key, 3 * sizeof(int64_t));
cudaMalloc(reinterpret_cast<void **>(&key), 3 * sizeof(int64_t));
cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice);
auto neighbor_sample_res = g.graph_neighbor_sample(0, (int64_t *)key, 3, 3);
auto neighbor_sample_res =
g.graph_neighbor_sample(0, reinterpret_cast<int64_t *>(key), 3, 3);
res = new int64_t[7];
cudaMemcpy(res, neighbor_sample_res->val, 56, cudaMemcpyDeviceToHost);
int *actual_sample_size = new int[3];
......
......@@ -46,19 +46,19 @@
#include "paddle/fluid/string/printf.h"
#include "paddle/phi/kernels/funcs/math_function.h"
using namespace paddle::framework;
using paddle::framework;
namespace platform = paddle::platform;
namespace operators = paddle::operators;
namespace memory = paddle::memory;
namespace distributed = paddle::distributed;
std::string input_file;
const char *input_file;
int exe_count = 100;
int use_nv = 1;
int fixed_key_size = 50000, sample_size = 32,
bfs_sample_nodes_in_each_shard = 10000, init_search_size = 1,
bfs_sample_edges = 20, gpu_num1 = 8, gpu_num = 8;
std::string gpu_str = "0,1,2,3,4,5,6,7";
const char *gpu_str = "0,1,2,3,4,5,6,7";
int64_t *key[8];
std::vector<std::string> edges = {std::string("37\t45\t0.34"),
std::string("37\t145\t0.31"),
......@@ -115,8 +115,8 @@ void testSampleRate() {
index += node.get_size(false);
// res.push_back(node);
ids.push_back(node.get_id());
int swap_pos = rand() % ids.size();
std::swap(ids[swap_pos], ids[(int)ids.size() - 1]);
int swap_pos = rand_r() % ids.size();
std::swap(ids[swap_pos], ids[static_cast<int>(ids.size()) - 1]);
}
cur = ids.size();
// if (sample_actual_size == 0) break;
......@@ -161,8 +161,8 @@ void testSampleRate() {
actual_size[i].push_back(ac[j - s] / sizeof(int64_t));
int ss = ac[j - s] / sizeof(int64_t);
for (int k = 0; k < ss; k++) {
sample_neighbors[i].push_back(
*((int64_t *)(buffers[j - s].get() + k * sizeof(int64_t))));
sample_neighbors[i].push_back(*(reinterpret_cast<int64_t *>(
buffers[j - s].get() + k * sizeof(int64_t))));
}
}
}
......@@ -252,7 +252,8 @@ void testSampleRate() {
*/
for (int i = 0; i < gpu_num1; i++) {
platform::CUDADeviceGuard guard(device_id_mapping[i]);
cudaMalloc((void **)&key[i], ids.size() * sizeof(int64_t));
cudaMalloc(reinterpret_cast<void **>(&key[i]),
ids.size() * sizeof(int64_t));
cudaMemcpy(key[i],
ids.data(),
ids.size() * sizeof(int64_t),
......@@ -285,16 +286,16 @@ void testSampleRate() {
for (int k = 0; k < exe_count; k++) {
st = 0;
while (st < size) {
int len = std::min(fixed_key_size, (int)ids.size() - st);
int len = std::min(fixed_key_size, static_cast<int>(ids.size()) - st);
auto r = g.graph_neighbor_sample(
i, (int64_t *)(key[i] + st), sample_size, len);
i, reinterpret_cast<int64_t *>(key[i] + st), sample_size, len);
st += len;
delete r;
}
}
};
auto start1 = std::chrono::steady_clock::now();
std::thread thr[gpu_num1];
std::thread thr[gpu_num1]; // NOLINT
for (int i = 0; i < gpu_num1; i++) {
thr[i] = std::thread(func, i);
}
......@@ -313,16 +314,20 @@ void testSampleRate() {
for (int k = 0; k < exe_count; k++) {
st = 0;
while (st < size) {
int len = std::min(fixed_key_size, (int)ids.size() - st);
auto r = g.graph_neighbor_sample_v2(
i, (int64_t *)(key[i] + st), sample_size, len, false);
int len = std::min(fixed_key_size, static_cast<int>(ids.size()) - st);
auto r =
g.graph_neighbor_sample_v2(i,
reinterpret_cast<int64_t *>(key[i] + st),
sample_size,
len,
false);
st += len;
delete r;
}
}
};
auto start2 = std::chrono::steady_clock::now();
std::thread thr2[gpu_num1];
std::thread thr2[gpu_num1]; // NOLINT
for (int i = 0; i < gpu_num1; i++) {
thr2[i] = std::thread(func2, i);
}
......
......@@ -286,7 +286,7 @@ void HeterWrapper::EndPass(Scope* scope, int num) {
void HeterWrapper::CallRemoteXpu(std::shared_ptr<HeterTask> task,
HeterCpuWorker* worker,
int mpi_rank,
std::vector<std::string>& send_vars) {
const std::vector<std::string>& send_vars) {
HeterRequest request;
request.set_cmd(0);
request.set_cur_batch(task->cur_batch_);
......@@ -329,10 +329,11 @@ void HeterWrapper::CallRemoteXpu(std::shared_ptr<HeterTask> task,
stub.service(&done->cntl, &request, &done->response, done);
}
void HeterWrapper::CallRemoteXpuSync(std::shared_ptr<HeterTask> task,
HeterCpuWorker* worker,
int mpi_rank,
std::vector<std::string>& send_vars) {
void HeterWrapper::CallRemoteXpuSync(
std::shared_ptr<HeterTask> task,
HeterCpuWorker* worker,
int mpi_rank,
const std::vector<std::string>& send_vars) {
HeterRequest request;
HeterResponse response;
brpc::Controller cntl;
......
......@@ -40,7 +40,7 @@ typedef std::function<void(void*)> HeterRpcCallbackFunc;
class OnHeterRpcDone : public google::protobuf::Closure {
public:
OnHeterRpcDone(HeterRpcCallbackFunc func) : handler_(func) {}
explicit OnHeterRpcDone(HeterRpcCallbackFunc func) : handler_(func) {}
virtual ~OnHeterRpcDone() {}
void Run() {
std::unique_ptr<OnHeterRpcDone> self_guard(this);
......@@ -75,12 +75,12 @@ class HeterWrapper {
void CallRemoteXpu(std::shared_ptr<HeterTask> task,
HeterCpuWorker* worker,
int mpi_rank,
std::vector<std::string>& send_vars);
const std::vector<std::string>& send_vars);
void CallRemoteXpuSync(std::shared_ptr<HeterTask> task,
HeterCpuWorker* worker,
int mpi_rank,
std::vector<std::string>& send_vars);
const std::vector<std::string>& send_vars);
void StopXpuService(int num);
......
......@@ -26,8 +26,8 @@ void SampleUniqueNeighbors(
bidiiter begin,
bidiiter end,
int num_samples,
std::mt19937& rng,
std::uniform_int_distribution<int>& dice_distribution) {
std::mt19937& rng, // NOLINT
std::uniform_int_distribution<int>& dice_distribution) { // NOLINT
int left_num = std::distance(begin, end);
for (int i = 0; i < num_samples; i++) {
bidiiter r = begin;
......@@ -46,8 +46,8 @@ void SampleUniqueNeighborsWithEids(
bidiiter eid_begin,
bidiiter eid_end,
int num_samples,
std::mt19937& rng,
std::uniform_int_distribution<int>& dice_distribution) {
std::mt19937& rng, // NOLINT
std::uniform_int_distribution<int>& dice_distribution) { // NOLINT
int left_num = std::distance(src_begin, src_end);
for (int i = 0; i < num_samples; i++) {
bidiiter r1 = src_begin, r2 = eid_begin;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册