Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ce704ee9
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ce704ee9
编写于
6月 17, 2022
作者:
Z
zhangchunle
提交者:
GitHub
6月 17, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix sign-compare warning (#43401)
上级
31b73346
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
39 addition
and
36 deletion
+39
-36
paddle/fluid/distributed/ps/service/brpc_ps_client.cc
paddle/fluid/distributed/ps/service/brpc_ps_client.cc
+4
-3
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
+1
-1
paddle/fluid/distributed/ps/service/communicator/communicator.cc
...fluid/distributed/ps/service/communicator/communicator.cc
+2
-2
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
+4
-3
paddle/fluid/distributed/ps/service/ps_client.cc
paddle/fluid/distributed/ps/service/ps_client.cc
+1
-1
paddle/fluid/distributed/ps/service/ps_local_client.cc
paddle/fluid/distributed/ps/service/ps_local_client.cc
+1
-1
paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc
...uid/distributed/ps/service/ps_service/graph_py_service.cc
+1
-1
paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h
...luid/distributed/ps/service/ps_service/graph_py_service.h
+3
-3
paddle/fluid/distributed/ps/service/server.cc
paddle/fluid/distributed/ps/service/server.cc
+1
-1
paddle/fluid/distributed/ps/table/common_graph_table.cc
paddle/fluid/distributed/ps/table/common_graph_table.cc
+2
-2
paddle/fluid/distributed/ps/table/memory_dense_table.cc
paddle/fluid/distributed/ps/table/memory_dense_table.cc
+2
-2
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
+3
-2
paddle/fluid/distributed/ps/table/ssd_sparse_table.h
paddle/fluid/distributed/ps/table/ssd_sparse_table.h
+1
-1
paddle/fluid/distributed/ps/wrapper/fleet.cc
paddle/fluid/distributed/ps/wrapper/fleet.cc
+3
-3
paddle/fluid/distributed/test/brpc_service_sparse_sgd_test.cc
...le/fluid/distributed/test/brpc_service_sparse_sgd_test.cc
+1
-1
paddle/fluid/distributed/test/ctr_accessor_test.cc
paddle/fluid/distributed/test/ctr_accessor_test.cc
+4
-4
paddle/fluid/distributed/test/memory_sparse_table_test.cc
paddle/fluid/distributed/test/memory_sparse_table_test.cc
+1
-1
paddle/fluid/pybind/fleet_py.cc
paddle/fluid/pybind/fleet_py.cc
+4
-4
未找到文件。
paddle/fluid/distributed/ps/service/brpc_ps_client.cc
浏览文件 @
ce704ee9
...
...
@@ -136,7 +136,7 @@ int32_t BrpcPsClient::CreateClient2ClientConnection(
server_ip_port
.
append
(
":"
);
server_ip_port
.
append
(
std
::
to_string
(
client_list
[
i
].
port
));
_client_channels
[
i
].
reset
(
new
brpc
::
Channel
());
if
(
_client_channels
[
i
]
->
Init
(
server_ip_port
.
c_str
(),
""
,
&
options
)
!=
0
)
{
if
(
_client_channels
[
i
]
->
Init
(
server_ip_port
.
c_str
(),
""
,
&
options
))
{
VLOG
(
0
)
<<
"BrpcPSClient connect to Client:"
<<
server_ip_port
<<
" Failed! Try again."
;
std
::
string
int_ip_port
=
...
...
@@ -1195,7 +1195,8 @@ std::future<int32_t> BrpcPsClient::SendClient2ClientMsg(
int
msg_type
,
int
to_client_id
,
const
std
::
string
&
msg
)
{
auto
promise
=
std
::
make_shared
<
std
::
promise
<
int32_t
>>
();
std
::
future
<
int
>
fut
=
promise
->
get_future
();
if
(
to_client_id
>=
_client_channels
.
size
())
{
if
(
to_client_id
>=
0
&&
static_cast
<
size_t
>
(
to_client_id
)
>=
_client_channels
.
size
())
{
VLOG
(
0
)
<<
"to_client_id is out of range clients, which size is "
<<
_client_channels
.
size
();
promise
->
set_value
(
-
1
);
...
...
@@ -1778,7 +1779,7 @@ void BrpcPsClient::PushDenseTaskConsume() {
});
++
merge_count
;
}
for
(
uint32
_t
i
=
0
;
i
<
merge_count
;
++
i
)
{
for
(
size
_t
i
=
0
;
i
<
merge_count
;
++
i
)
{
merge_status
[
i
].
wait
();
}
...
...
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
浏览文件 @
ce704ee9
...
...
@@ -713,7 +713,7 @@ int32_t BrpcPsService::CacheShuffle(Table *table,
};
std
::
vector
<
Table
*>
table_ptrs
;
for
(
size_
t
i
=
3
;
i
<
request
.
params_size
();
++
i
)
{
for
(
in
t
i
=
3
;
i
<
request
.
params_size
();
++
i
)
{
int
table_id
=
std
::
stoi
(
request
.
params
(
i
));
Table
*
table_ptr
=
_server
->
GetTable
(
table_id
);
table_ptrs
.
push_back
(
table_ptr
);
...
...
paddle/fluid/distributed/ps/service/communicator/communicator.cc
浏览文件 @
ce704ee9
...
...
@@ -681,7 +681,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync(
if
(
tensor
->
lod
().
size
()
>
0
)
{
for
(
size_t
i
=
0
;
i
<
tensor
->
lod
()[
0
].
size
()
-
1
;
++
i
)
{
for
(
in
t
j
=
tensor
->
lod
()[
0
][
i
];
j
<
tensor
->
lod
()[
0
][
i
+
1
];
for
(
size_
t
j
=
tensor
->
lod
()[
0
][
i
];
j
<
tensor
->
lod
()[
0
][
i
+
1
];
++
j
,
output_len
+=
fea_dim
)
{
uint64_t
real_id
=
static_cast
<
uint64_t
>
(
ids
[
j
]);
if
(
real_id
==
padding_id
)
{
...
...
@@ -727,7 +727,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync(
++
input_idx
;
}
}
CHECK
(
output_len
==
g_tensor
->
numel
());
CHECK
(
static_cast
<
size_t
>
(
output_len
)
==
g_tensor
->
numel
());
}
std
::
vector
<
float
*>
push_g_vec
(
input_idx
,
nullptr
);
...
...
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
浏览文件 @
ce704ee9
...
...
@@ -547,7 +547,8 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers(
seq
.
push_back
(
request_idx
);
}
size_t
remote_call_num
=
request_call_num
;
if
(
request2server
.
size
()
!=
0
&&
request2server
.
back
()
==
rank
)
{
if
(
request2server
.
size
()
!=
0
&&
static_cast
<
size_t
>
(
request2server
.
back
())
==
rank
)
{
remote_call_num
--
;
local_buffers
.
resize
(
node_id_buckets
.
back
().
size
());
local_actual_sizes
.
resize
(
node_id_buckets
.
back
().
size
());
...
...
@@ -582,7 +583,7 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers(
for
(
size_t
i
=
0
;
i
<
node_num
;
i
++
)
{
if
(
fail_num
>
0
&&
failed
[
seq
[
i
]])
{
size
=
0
;
}
else
if
(
request2server
[
seq
[
i
]]
!=
rank
)
{
}
else
if
(
static_cast
<
size_t
>
(
request2server
[
seq
[
i
]])
!=
rank
)
{
res
[
seq
[
i
]]
->
copy_and_forward
(
&
size
,
sizeof
(
int
));
}
else
{
size
=
local_actual_sizes
[
local_index
++
];
...
...
@@ -596,7 +597,7 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers(
for
(
size_t
i
=
0
;
i
<
node_num
;
i
++
)
{
if
(
fail_num
>
0
&&
failed
[
seq
[
i
]])
{
continue
;
}
else
if
(
request2server
[
seq
[
i
]]
!=
rank
)
{
}
else
if
(
static_cast
<
size_t
>
(
request2server
[
seq
[
i
]])
!=
rank
)
{
char
temp
[
actual_size
[
i
]
+
1
];
res
[
seq
[
i
]]
->
copy_and_forward
(
temp
,
actual_size
[
i
]);
cntl
->
response_attachment
().
append
(
temp
,
actual_size
[
i
]);
...
...
paddle/fluid/distributed/ps/service/ps_client.cc
浏览文件 @
ce704ee9
...
...
@@ -43,7 +43,7 @@ int32_t PSClient::Configure(
const
auto
&
work_param
=
_config
.
worker_param
().
downpour_worker_param
();
for
(
size_
t
i
=
0
;
i
<
work_param
.
downpour_table_param_size
();
++
i
)
{
for
(
in
t
i
=
0
;
i
<
work_param
.
downpour_table_param_size
();
++
i
)
{
auto
*
accessor
=
CREATE_PSCORE_CLASS
(
ValueAccessor
,
work_param
.
downpour_table_param
(
i
).
accessor
().
accessor_class
());
...
...
paddle/fluid/distributed/ps/service/ps_local_client.cc
浏览文件 @
ce704ee9
...
...
@@ -23,7 +23,7 @@ namespace distributed {
int32_t
PsLocalClient
::
Initialize
()
{
const
auto
&
downpour_param
=
_config
.
server_param
().
downpour_server_param
();
TableManager
::
Instance
().
Initialize
();
for
(
size_
t
i
=
0
;
i
<
downpour_param
.
downpour_table_param_size
();
++
i
)
{
for
(
in
t
i
=
0
;
i
<
downpour_param
.
downpour_table_param_size
();
++
i
)
{
auto
*
table
=
CREATE_PSCORE_CLASS
(
Table
,
downpour_param
.
downpour_table_param
(
i
).
table_class
());
table
->
SetShard
(
0
,
1
);
...
...
paddle/fluid/distributed/ps/service/ps_service/graph_py_service.cc
浏览文件 @
ce704ee9
...
...
@@ -51,7 +51,7 @@ void GraphPyService::add_table_feat_conf(std::string table_name,
int
feat_idx
=
table_feat_mapping
[
idx
][
feat_name
];
VLOG
(
0
)
<<
"table_name "
<<
table_name
<<
" mapping id "
<<
idx
;
VLOG
(
0
)
<<
" feat name "
<<
feat_name
<<
" feat id"
<<
feat_idx
;
if
(
feat_idx
<
table_feat_conf_feat_name
[
idx
].
size
())
{
if
(
static_cast
<
size_t
>
(
feat_idx
)
<
table_feat_conf_feat_name
[
idx
].
size
())
{
// overide
table_feat_conf_feat_name
[
idx
][
feat_idx
]
=
feat_name
;
table_feat_conf_feat_dtype
[
idx
][
feat_idx
]
=
feat_dtype
;
...
...
paddle/fluid/distributed/ps/service/ps_service/graph_py_service.h
浏览文件 @
ce704ee9
...
...
@@ -81,14 +81,14 @@ class GraphPyService {
graph_proto
->
set_table_name
(
"cpu_graph_table"
);
graph_proto
->
set_use_cache
(
false
);
for
(
in
t
i
=
0
;
i
<
id_to_edge
.
size
();
i
++
)
for
(
size_
t
i
=
0
;
i
<
id_to_edge
.
size
();
i
++
)
graph_proto
->
add_edge_types
(
id_to_edge
[
i
]);
for
(
in
t
i
=
0
;
i
<
id_to_feature
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
id_to_feature
.
size
();
i
++
)
{
graph_proto
->
add_node_types
(
id_to_feature
[
i
]);
auto
feat_node
=
id_to_feature
[
i
];
::
paddle
::
distributed
::
GraphFeature
*
g_f
=
graph_proto
->
add_graph_feature
();
for
(
in
t
x
=
0
;
x
<
table_feat_conf_feat_name
[
i
].
size
();
x
++
)
{
for
(
size_
t
x
=
0
;
x
<
table_feat_conf_feat_name
[
i
].
size
();
x
++
)
{
g_f
->
add_name
(
table_feat_conf_feat_name
[
i
][
x
]);
g_f
->
add_dtype
(
table_feat_conf_feat_dtype
[
i
][
x
]);
g_f
->
add_shape
(
table_feat_conf_feat_shape
[
i
][
x
]);
...
...
paddle/fluid/distributed/ps/service/server.cc
浏览文件 @
ce704ee9
...
...
@@ -76,7 +76,7 @@ int32_t PSServer::Configure(
uint32_t
barrier_table
=
UINT32_MAX
;
uint32_t
global_step_table
=
UINT32_MAX
;
for
(
size_
t
i
=
0
;
i
<
downpour_param
.
downpour_table_param_size
();
++
i
)
{
for
(
in
t
i
=
0
;
i
<
downpour_param
.
downpour_table_param_size
();
++
i
)
{
auto
*
table
=
CREATE_PSCORE_CLASS
(
Table
,
downpour_param
.
downpour_table_param
(
i
).
table_class
());
...
...
paddle/fluid/distributed/ps/table/common_graph_table.cc
浏览文件 @
ce704ee9
...
...
@@ -1205,7 +1205,7 @@ uint32_t GraphTable::get_thread_pool_index_by_shard_index(int64_t shard_index) {
int32_t
GraphTable
::
clear_nodes
(
int
type_id
,
int
idx
)
{
auto
&
search_shards
=
type_id
==
0
?
edge_shards
[
idx
]
:
feature_shards
[
idx
];
for
(
in
t
i
=
0
;
i
<
search_shards
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
search_shards
.
size
();
i
++
)
{
search_shards
[
i
]
->
clear
();
}
return
0
;
...
...
@@ -1478,7 +1478,7 @@ std::vector<std::vector<int64_t>> GraphTable::get_all_id(int type_id, int idx,
std
::
vector
<
std
::
vector
<
int64_t
>>
res
(
slice_num
);
auto
&
search_shards
=
type_id
==
0
?
edge_shards
[
idx
]
:
feature_shards
[
idx
];
std
::
vector
<
std
::
future
<
std
::
vector
<
int64_t
>>>
tasks
;
for
(
in
t
i
=
0
;
i
<
search_shards
.
size
();
i
++
)
{
for
(
size_
t
i
=
0
;
i
<
search_shards
.
size
();
i
++
)
{
tasks
.
push_back
(
_shards_task_pool
[
i
%
task_pool_size_
]
->
enqueue
(
[
&
search_shards
,
i
]()
->
std
::
vector
<
int64_t
>
{
return
search_shards
[
i
]
->
get_all_id
();
...
...
paddle/fluid/distributed/ps/table/memory_dense_table.cc
浏览文件 @
ce704ee9
...
...
@@ -81,8 +81,8 @@ int32_t MemoryDenseTable::InitializeValue() {
fixed_len_params_dim_
=
0
;
for
(
int
x
=
0
;
x
<
size
;
++
x
)
{
int
dim
=
common
.
dims
()[
x
];
if
(
dim
!=
param_dim_
)
{
auto
&
dim
=
common
.
dims
()[
x
];
if
(
static_cast
<
int
>
(
dim
)
!=
param_dim_
)
{
fixed_len_params_dim_
+=
dim
;
}
else
{
param_col_ids_
.
push_back
(
x
);
...
...
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
浏览文件 @
ce704ee9
...
...
@@ -625,7 +625,7 @@ int32_t SSDSparseTable::Load(const std::string& path,
}
//加载path目录下数据[start_idx, end_idx)
int32_t
SSDSparseTable
::
Load
(
size_t
start_idx
,
in
t
end_idx
,
int32_t
SSDSparseTable
::
Load
(
size_t
start_idx
,
size_
t
end_idx
,
const
std
::
vector
<
std
::
string
>&
file_list
,
const
std
::
string
&
param
)
{
if
(
start_idx
>=
file_list
.
size
())
{
...
...
@@ -699,7 +699,8 @@ int32_t SSDSparseTable::Load(size_t start_idx, int end_idx,
ssd_values
.
emplace_back
(
std
::
make_pair
((
char
*
)
data_buffer_ptr
,
value_size
*
sizeof
(
float
)));
data_buffer_ptr
+=
feature_value_size
;
if
(
ssd_keys
.
size
()
==
FLAGS_pserver_load_batch_size
)
{
if
(
static_cast
<
int
>
(
ssd_keys
.
size
())
==
FLAGS_pserver_load_batch_size
)
{
_db
->
put_batch
(
local_shard_id
,
ssd_keys
,
ssd_values
,
ssd_keys
.
size
());
ssd_keys
.
clear
();
...
...
paddle/fluid/distributed/ps/table/ssd_sparse_table.h
浏览文件 @
ce704ee9
...
...
@@ -79,7 +79,7 @@ class SSDSparseTable : public MemorySparseTable {
virtual
int32_t
Load
(
const
std
::
string
&
path
,
const
std
::
string
&
param
)
override
;
//加载path目录下数据[start_idx, end_idx)
virtual
int32_t
Load
(
size_t
start_idx
,
in
t
end_idx
,
virtual
int32_t
Load
(
size_t
start_idx
,
size_
t
end_idx
,
const
std
::
vector
<
std
::
string
>&
file_list
,
const
std
::
string
&
param
);
int64_t
LocalSize
();
...
...
paddle/fluid/distributed/ps/wrapper/fleet.cc
浏览文件 @
ce704ee9
...
...
@@ -536,7 +536,7 @@ void FleetWrapper::PushSparseFromTensorAsync(
output_len
=
0
;
if
(
tensor
->
lod
().
size
()
>
0
)
{
for
(
in
t
i
=
0
;
i
<
tensor
->
lod
()[
0
].
size
()
-
1
;
++
i
)
{
for
(
size_
t
i
=
0
;
i
<
tensor
->
lod
()[
0
].
size
()
-
1
;
++
i
)
{
for
(
size_t
j
=
tensor
->
lod
()[
0
][
i
];
j
<
tensor
->
lod
()[
0
][
i
+
1
];
++
j
,
output_len
+=
fea_dim
)
{
uint64_t
real_id
=
static_cast
<
uint64_t
>
(
ids
[
j
]);
...
...
@@ -566,7 +566,7 @@ void FleetWrapper::PushSparseFromTensorAsync(
}
}
}
else
{
for
(
in
t
i
=
0
;
i
<
len
;
++
i
,
output_len
+=
fea_dim
)
{
for
(
size_
t
i
=
0
;
i
<
len
;
++
i
,
output_len
+=
fea_dim
)
{
uint64_t
real_id
=
static_cast
<
uint64_t
>
(
ids
[
i
]);
if
(
real_id
==
padding_id
)
{
continue
;
...
...
@@ -592,7 +592,7 @@ void FleetWrapper::PushSparseFromTensorAsync(
++
input_idx
;
}
}
CHECK
(
output_len
==
g_tensor
->
numel
());
CHECK
(
static_cast
<
int64_t
>
(
output_len
)
==
g_tensor
->
numel
());
}
std
::
vector
<
float
*>
push_g_vec
(
input_idx
,
nullptr
);
...
...
paddle/fluid/distributed/test/brpc_service_sparse_sgd_test.cc
浏览文件 @
ce704ee9
...
...
@@ -295,7 +295,7 @@ void RunBrpcPushSparse() {
fea_temp_value_ptr
.
data
(),
0
,
fea_keys
.
data
(),
fea_keys
.
size
(),
true
);
pull_update_status
.
wait
();
for
(
size
_t
idx
=
0
;
idx
<
tensor
->
numel
();
++
idx
)
{
for
(
int64
_t
idx
=
0
;
idx
<
tensor
->
numel
();
++
idx
)
{
EXPECT_FLOAT_EQ
(
fea_temp_values
[
idx
],
fea_values
[
idx
]
-
1.0
);
}
...
...
paddle/fluid/distributed/test/ctr_accessor_test.cc
浏览文件 @
ce704ee9
...
...
@@ -222,15 +222,15 @@ TEST(downpour_feature_value_accessor_test, test_update) {
v
.
embed_w
=
value
[
i
][
5
];
int
idx
=
6
;
for
(
auto
j
=
0u
;
j
<
acc
->
common_feature_value
.
embed_sgd_dim
;
++
j
)
{
for
(
int
j
=
0
;
j
<
acc
->
common_feature_value
.
embed_sgd_dim
;
++
j
)
{
v
.
embed_g2sum
.
push_back
(
value
[
i
][
idx
+
j
]);
}
idx
+=
acc
->
common_feature_value
.
embed_sgd_dim
;
for
(
auto
j
=
0u
;
j
<
acc
->
common_feature_value
.
embedx_dim
;
++
j
)
{
for
(
int
j
=
0
;
j
<
acc
->
common_feature_value
.
embedx_dim
;
++
j
)
{
v
.
embedx_w
.
push_back
(
value
[
i
][
idx
+
j
]);
}
idx
+=
acc
->
common_feature_value
.
embedx_dim
;
for
(
auto
j
=
0u
;
j
<
acc
->
common_feature_value
.
embedx_sgd_dim
;
++
j
)
{
for
(
int
j
=
0
;
j
<
acc
->
common_feature_value
.
embedx_sgd_dim
;
++
j
)
{
v
.
embedx_g2sum
.
push_back
(
value
[
i
][
idx
+
j
]);
}
...
...
@@ -239,7 +239,7 @@ TEST(downpour_feature_value_accessor_test, test_update) {
push_v
.
show
=
grad
[
i
][
1
];
push_v
.
click
=
grad
[
i
][
2
];
push_v
.
embed_g
=
grad
[
i
][
3
];
for
(
auto
j
=
0
;
j
<
parameter
.
embedx_dim
();
++
j
)
{
for
(
int
j
=
0
;
j
<
parameter
.
embedx_dim
();
++
j
)
{
push_v
.
embedx_g
.
push_back
(
grad
[
i
][
4
+
j
]);
}
...
...
paddle/fluid/distributed/test/memory_sparse_table_test.cc
浏览文件 @
ce704ee9
...
...
@@ -142,7 +142,7 @@ TEST(MemorySparseTable, SGD) {
// table->PullSparse(pull_values.data(), value);
for
(
size_t
i
=
0
;
i
<
init_keys
.
size
();
++
i
)
{
for
(
size_
t
j
=
2
;
j
<
emb_dim
+
3
;
++
j
)
{
for
(
in
t
j
=
2
;
j
<
emb_dim
+
3
;
++
j
)
{
auto
update_val
=
init_values
[
i
*
(
emb_dim
+
1
)
+
j
]
-
0.1
*
total_gradients
[
3
+
i
*
(
emb_dim
+
4
)
+
j
];
VLOG
(
3
)
<<
total_gradients
[
i
*
(
emb_dim
+
4
)
+
j
+
3
]
<<
":"
...
...
paddle/fluid/pybind/fleet_py.cc
浏览文件 @
ce704ee9
...
...
@@ -221,8 +221,8 @@ void BindGraphPyClient(py::module* m) {
auto
feats
=
self
.
get_node_feat
(
node_type
,
node_ids
,
feature_names
);
std
::
vector
<
std
::
vector
<
py
::
bytes
>>
bytes_feats
(
feats
.
size
());
for
(
in
t
i
=
0
;
i
<
feats
.
size
();
++
i
)
{
for
(
in
t
j
=
0
;
j
<
feats
[
i
].
size
();
++
j
)
{
for
(
size_
t
i
=
0
;
i
<
feats
.
size
();
++
i
)
{
for
(
size_
t
j
=
0
;
j
<
feats
[
i
].
size
();
++
j
)
{
bytes_feats
[
i
].
push_back
(
py
::
bytes
(
feats
[
i
][
j
]));
}
}
...
...
@@ -234,8 +234,8 @@ void BindGraphPyClient(py::module* m) {
std
::
vector
<
std
::
string
>
feature_names
,
std
::
vector
<
std
::
vector
<
py
::
bytes
>>
bytes_feats
)
{
std
::
vector
<
std
::
vector
<
std
::
string
>>
feats
(
bytes_feats
.
size
());
for
(
in
t
i
=
0
;
i
<
bytes_feats
.
size
();
++
i
)
{
for
(
in
t
j
=
0
;
j
<
bytes_feats
[
i
].
size
();
++
j
)
{
for
(
size_
t
i
=
0
;
i
<
bytes_feats
.
size
();
++
i
)
{
for
(
size_
t
j
=
0
;
j
<
bytes_feats
[
i
].
size
();
++
j
)
{
feats
[
i
].
push_back
(
std
::
string
(
bytes_feats
[
i
][
j
]));
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录