Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
209d534d
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
209d534d
编写于
2月 08, 2023
作者:
W
wangxiaoning
提交者:
GitHub
2月 08, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[code style]fix cpplint codestyle (#50314)
* fix codestyle * fix std
上级
fa284076
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
33 addition
and
21 deletion
+33
-21
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
+15
-11
paddle/fluid/distributed/ps/table/common_graph_table.cc
paddle/fluid/distributed/ps/table/common_graph_table.cc
+2
-2
paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table_inl.cu
.../fluid/framework/fleet/heter_ps/graph_gpu_ps_table_inl.cu
+16
-8
未找到文件。
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
浏览文件 @
209d534d
...
@@ -126,9 +126,10 @@ int32_t GraphBrpcService::clear_nodes(Table *table,
...
@@ -126,9 +126,10 @@ int32_t GraphBrpcService::clear_nodes(Table *table,
const
PsRequestMessage
&
request
,
const
PsRequestMessage
&
request
,
PsResponseMessage
&
response
,
PsResponseMessage
&
response
,
brpc
::
Controller
*
cntl
)
{
brpc
::
Controller
*
cntl
)
{
GraphTableType
type_id
=
*
(
GraphTableType
*
)(
request
.
params
(
0
).
c_str
());
GraphTableType
type_id
=
int
idx_
=
*
(
int
*
)(
request
.
params
(
1
).
c_str
());
*
(
reinterpret_cast
<
const
GraphTableType
*>
(
request
.
params
(
0
).
c_str
()));
((
GraphTable
*
)
table
)
->
clear_nodes
(
type_id
,
idx_
);
int
idx_
=
*
(
reinterpret_cast
<
const
int
*>
(
request
.
params
(
1
).
c_str
()));
(
reinterpret_cast
<
GraphTable
*>
(
table
))
->
clear_nodes
(
type_id
,
idx_
);
return
0
;
return
0
;
}
}
...
@@ -380,11 +381,12 @@ int32_t GraphBrpcService::pull_graph_list(Table *table,
...
@@ -380,11 +381,12 @@ int32_t GraphBrpcService::pull_graph_list(Table *table,
response
,
-
1
,
"pull_graph_list request requires at least 5 arguments"
);
response
,
-
1
,
"pull_graph_list request requires at least 5 arguments"
);
return
0
;
return
0
;
}
}
GraphTableType
type_id
=
*
(
GraphTableType
*
)(
request
.
params
(
0
).
c_str
());
GraphTableType
type_id
=
int
idx
=
*
(
int
*
)(
request
.
params
(
1
).
c_str
());
*
(
reinterpret_cast
<
const
GraphTableType
*>
(
request
.
params
(
0
).
c_str
()));
int
start
=
*
(
int
*
)(
request
.
params
(
2
).
c_str
());
int
idx
=
*
(
reinterpret_cast
<
const
int
*>
(
request
.
params
(
1
).
c_str
()));
int
size
=
*
(
int
*
)(
request
.
params
(
3
).
c_str
());
int
start
=
*
(
reinterpret_cast
<
const
int
*>
(
request
.
params
(
2
).
c_str
()));
int
step
=
*
(
int
*
)(
request
.
params
(
4
).
c_str
());
int
size
=
*
(
reinterpret_cast
<
const
int
*>
(
request
.
params
(
3
).
c_str
()));
int
step
=
*
(
reinterpret_cast
<
const
int
*>
(
request
.
params
(
4
).
c_str
()));
std
::
unique_ptr
<
char
[]
>
buffer
;
std
::
unique_ptr
<
char
[]
>
buffer
;
int
actual_size
;
int
actual_size
;
(
reinterpret_cast
<
GraphTable
*>
(
table
))
(
reinterpret_cast
<
GraphTable
*>
(
table
))
...
@@ -432,9 +434,11 @@ int32_t GraphBrpcService::graph_random_sample_nodes(
...
@@ -432,9 +434,11 @@ int32_t GraphBrpcService::graph_random_sample_nodes(
const
PsRequestMessage
&
request
,
const
PsRequestMessage
&
request
,
PsResponseMessage
&
response
,
PsResponseMessage
&
response
,
brpc
::
Controller
*
cntl
)
{
brpc
::
Controller
*
cntl
)
{
GraphTableType
type_id
=
*
(
GraphTableType
*
)(
request
.
params
(
0
).
c_str
());
GraphTableType
type_id
=
int
idx_
=
*
(
int
*
)(
request
.
params
(
1
).
c_str
());
*
(
reinterpret_cast
<
const
GraphTableType
*>
(
request
.
params
(
0
).
c_str
()));
size_t
size
=
*
(
uint64_t
*
)(
request
.
params
(
2
).
c_str
());
int
idx_
=
*
(
reinterpret_cast
<
const
int
*>
(
request
.
params
(
1
).
c_str
()));
size_t
size
=
*
(
reinterpret_cast
<
const
uint64_t
*>
(
request
.
params
(
2
).
c_str
()));
// size_t size = *(int64_t *)(request.params(0).c_str());
// size_t size = *(int64_t *)(request.params(0).c_str());
std
::
unique_ptr
<
char
[]
>
buffer
;
std
::
unique_ptr
<
char
[]
>
buffer
;
int
actual_size
;
int
actual_size
;
...
...
paddle/fluid/distributed/ps/table/common_graph_table.cc
浏览文件 @
209d534d
...
@@ -1917,7 +1917,7 @@ int32_t GraphTable::random_sample_nodes(GraphTableType table_type,
...
@@ -1917,7 +1917,7 @@ int32_t GraphTable::random_sample_nodes(GraphTableType table_type,
int
total_size
=
0
;
int
total_size
=
0
;
auto
&
shards
=
table_type
==
GraphTableType
::
EDGE_TABLE
?
edge_shards
[
idx
]
auto
&
shards
=
table_type
==
GraphTableType
::
EDGE_TABLE
?
edge_shards
[
idx
]
:
feature_shards
[
idx
];
:
feature_shards
[
idx
];
for
(
int
i
=
0
;
i
<
(
int
)
shards
.
size
(
);
i
++
)
{
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
shards
.
size
()
);
i
++
)
{
total_size
+=
shards
[
i
]
->
get_size
();
total_size
+=
shards
[
i
]
->
get_size
();
}
}
if
(
sample_size
>
total_size
)
sample_size
=
total_size
;
if
(
sample_size
>
total_size
)
sample_size
=
total_size
;
...
@@ -2429,7 +2429,7 @@ int GraphTable::get_all_feature_ids(
...
@@ -2429,7 +2429,7 @@ int GraphTable::get_all_feature_ids(
int
GraphTable
::
get_node_embedding_ids
(
int
GraphTable
::
get_node_embedding_ids
(
int
slice_num
,
std
::
vector
<
std
::
vector
<
uint64_t
>>
*
output
)
{
int
slice_num
,
std
::
vector
<
std
::
vector
<
uint64_t
>>
*
output
)
{
if
(
is_load_reverse_edge
and
!
FLAGS_graph_get_neighbor_id
)
{
if
(
is_load_reverse_edge
&&
!
FLAGS_graph_get_neighbor_id
)
{
return
get_all_id
(
GraphTableType
::
EDGE_TABLE
,
slice_num
,
output
);
return
get_all_id
(
GraphTableType
::
EDGE_TABLE
,
slice_num
,
output
);
}
else
{
}
else
{
get_all_id
(
GraphTableType
::
EDGE_TABLE
,
slice_num
,
output
);
get_all_id
(
GraphTableType
::
EDGE_TABLE
,
slice_num
,
output
);
...
...
paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table_inl.cu
浏览文件 @
209d534d
...
@@ -466,13 +466,13 @@ void GpuPsGraphTable::move_result_to_source_gpu(int start_index,
...
@@ -466,13 +466,13 @@ void GpuPsGraphTable::move_result_to_source_gpu(int start_index,
void
GpuPsGraphTable
::
move_degree_to_source_gpu
(
void
GpuPsGraphTable
::
move_degree_to_source_gpu
(
int
start_index
,
int
gpu_num
,
int
*
h_left
,
int
*
h_right
,
int
*
node_degree
)
{
int
start_index
,
int
gpu_num
,
int
*
h_left
,
int
*
h_right
,
int
*
node_degree
)
{
int
shard_len
[
gpu_num
]
;
std
::
vector
<
int
>
shard_len
(
gpu_num
,
0
)
;
for
(
int
i
=
0
;
i
<
gpu_num
;
i
++
)
{
for
(
int
i
=
0
;
i
<
gpu_num
;
i
++
)
{
if
(
h_left
[
i
]
==
-
1
||
h_right
[
i
]
==
-
1
)
{
if
(
h_left
[
i
]
==
-
1
||
h_right
[
i
]
==
-
1
)
{
continue
;
continue
;
}
}
shard_len
[
i
]
=
h_right
[
i
]
-
h_left
[
i
]
+
1
;
shard_len
[
i
]
=
h_right
[
i
]
-
h_left
[
i
]
+
1
;
int
cur_step
=
(
int
)
path_
[
start_index
][
i
].
nodes_
.
size
(
)
-
1
;
int
cur_step
=
static_cast
<
int
>
(
path_
[
start_index
][
i
].
nodes_
.
size
()
)
-
1
;
for
(
int
j
=
cur_step
;
j
>
0
;
j
--
)
{
for
(
int
j
=
cur_step
;
j
>
0
;
j
--
)
{
CUDA_CHECK
(
CUDA_CHECK
(
cudaMemcpyAsync
(
path_
[
start_index
][
i
].
nodes_
[
j
-
1
].
val_storage
,
cudaMemcpyAsync
(
path_
[
start_index
][
i
].
nodes_
[
j
-
1
].
val_storage
,
...
@@ -1566,8 +1566,12 @@ void GpuPsGraphTable::get_node_degree(
...
@@ -1566,8 +1566,12 @@ void GpuPsGraphTable::get_node_degree(
len
*
sizeof
(
int
),
len
*
sizeof
(
int
),
phi
::
Stream
(
reinterpret_cast
<
phi
::
StreamId
>
(
stream
)));
phi
::
Stream
(
reinterpret_cast
<
phi
::
StreamId
>
(
stream
)));
int
*
d_shard_degree_ptr
=
reinterpret_cast
<
int
*>
(
d_shard_degree
->
ptr
());
int
*
d_shard_degree_ptr
=
reinterpret_cast
<
int
*>
(
d_shard_degree
->
ptr
());
split_input_to_shard
(
split_input_to_shard
(
reinterpret_cast
<
uint64_t
*>
(
key
),
(
uint64_t
*
)(
key
),
d_idx_ptr
,
len
,
d_left_ptr
,
d_right_ptr
,
gpu_id
);
d_idx_ptr
,
len
,
d_left_ptr
,
d_right_ptr
,
gpu_id
);
heter_comm_kernel_
->
fill_shard_key
(
heter_comm_kernel_
->
fill_shard_key
(
d_shard_keys_ptr
,
key
,
d_idx_ptr
,
len
,
stream
);
d_shard_keys_ptr
,
key
,
d_idx_ptr
,
len
,
stream
);
CUDA_CHECK
(
cudaStreamSynchronize
(
stream
));
CUDA_CHECK
(
cudaStreamSynchronize
(
stream
));
...
@@ -1594,8 +1598,12 @@ void GpuPsGraphTable::get_node_degree(
...
@@ -1594,8 +1598,12 @@ void GpuPsGraphTable::get_node_degree(
shard_len
*
sizeof
(
uint64_t
),
shard_len
*
sizeof
(
uint64_t
),
shard_len
*
sizeof
(
uint64_t
)
+
sizeof
(
int
)
*
shard_len
+
shard_len
%
2
);
shard_len
*
sizeof
(
uint64_t
)
+
sizeof
(
int
)
*
shard_len
+
shard_len
%
2
);
}
}
walk_to_dest
(
walk_to_dest
(
gpu_id
,
gpu_id
,
total_gpu
,
h_left
,
h_right
,
(
uint64_t
*
)(
d_shard_keys_ptr
),
NULL
);
total_gpu
,
h_left
,
h_right
,
reinterpret_cast
<
uint64_t
*>
(
d_shard_keys_ptr
),
NULL
);
for
(
int
i
=
0
;
i
<
total_gpu
;
++
i
)
{
for
(
int
i
=
0
;
i
<
total_gpu
;
++
i
)
{
if
(
h_left
[
i
]
==
-
1
)
{
if
(
h_left
[
i
]
==
-
1
)
{
continue
;
continue
;
...
@@ -1610,11 +1618,11 @@ void GpuPsGraphTable::get_node_degree(
...
@@ -1610,11 +1618,11 @@ void GpuPsGraphTable::get_node_degree(
get_table_offset
(
i
,
GraphTableType
::
EDGE_TABLE
,
edge_idx
);
get_table_offset
(
i
,
GraphTableType
::
EDGE_TABLE
,
edge_idx
);
tables_
[
table_offset
]
->
get
(
reinterpret_cast
<
uint64_t
*>
(
node
.
key_storage
),
tables_
[
table_offset
]
->
get
(
reinterpret_cast
<
uint64_t
*>
(
node
.
key_storage
),
reinterpret_cast
<
uint64_t
*>
(
node
.
val_storage
),
reinterpret_cast
<
uint64_t
*>
(
node
.
val_storage
),
(
size_t
)
(
h_right
[
i
]
-
h_left
[
i
]
+
1
),
static_cast
<
size_t
>
(
h_right
[
i
]
-
h_left
[
i
]
+
1
),
resource_
->
remote_stream
(
i
,
gpu_id
));
resource_
->
remote_stream
(
i
,
gpu_id
));
GpuPsNodeInfo
*
node_info_list
=
GpuPsNodeInfo
*
node_info_list
=
reinterpret_cast
<
GpuPsNodeInfo
*>
(
node
.
val_storage
);
reinterpret_cast
<
GpuPsNodeInfo
*>
(
node
.
val_storage
);
int
*
node_degree_array
=
(
int
*
)
(
node_info_list
+
shard_len
);
int
*
node_degree_array
=
reinterpret_cast
<
int
*>
(
node_info_list
+
shard_len
);
int
grid_size_
=
(
shard_len
-
1
)
/
block_size_
+
1
;
int
grid_size_
=
(
shard_len
-
1
)
/
block_size_
+
1
;
get_node_degree_kernel
<<<
grid_size_
,
get_node_degree_kernel
<<<
grid_size_
,
block_size_
,
block_size_
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录