Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
2bef8a48
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2bef8a48
编写于
6月 24, 2022
作者:
Z
zhangchunle
提交者:
GitHub
6月 24, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix unused-variable warning (#43791)
上级
b2704837
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
1 addition
and
44 deletion
+1
-44
paddle/fluid/distributed/ps/service/brpc_ps_client.cc
paddle/fluid/distributed/ps/service/brpc_ps_client.cc
+0
-5
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
+0
-2
paddle/fluid/distributed/ps/service/communicator/communicator.cc
...fluid/distributed/ps/service/communicator/communicator.cc
+0
-2
paddle/fluid/distributed/ps/service/graph_brpc_client.cc
paddle/fluid/distributed/ps/service/graph_brpc_client.cc
+0
-1
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
+1
-3
paddle/fluid/distributed/ps/service/ps_local_client.cc
paddle/fluid/distributed/ps/service/ps_local_client.cc
+0
-2
paddle/fluid/distributed/ps/table/ctr_accessor.cc
paddle/fluid/distributed/ps/table/ctr_accessor.cc
+0
-6
paddle/fluid/distributed/ps/table/ctr_double_accessor.cc
paddle/fluid/distributed/ps/table/ctr_double_accessor.cc
+0
-4
paddle/fluid/distributed/ps/table/ctr_dymf_accessor.cc
paddle/fluid/distributed/ps/table/ctr_dymf_accessor.cc
+0
-3
paddle/fluid/distributed/ps/table/memory_dense_table.cc
paddle/fluid/distributed/ps/table/memory_dense_table.cc
+0
-2
paddle/fluid/distributed/ps/table/memory_sparse_table.cc
paddle/fluid/distributed/ps/table/memory_sparse_table.cc
+0
-2
paddle/fluid/distributed/ps/table/sparse_accessor.cc
paddle/fluid/distributed/ps/table/sparse_accessor.cc
+0
-6
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
+0
-5
paddle/fluid/distributed/test/barrier_table_test.cc
paddle/fluid/distributed/test/barrier_table_test.cc
+0
-1
未找到文件。
paddle/fluid/distributed/ps/service/brpc_ps_client.cc
浏览文件 @
2bef8a48
...
...
@@ -1494,8 +1494,6 @@ void BrpcPsClient::PushSparseTaskConsume() {
merge_status
.
clear
();
std
::
vector
<
std
::
future
<
int
>>
().
swap
(
merge_status
);
_push_sparse_merge_count_map
[
table_id
]
=
0
;
auto
queue_size
=
task_queue
->
Size
();
}
else
{
// 未达到阈值 只做多路归并
std
::
vector
<
std
::
future
<
int
>>
merge_status
(
request_call_num
);
for
(
size_t
shard_idx
=
0
;
shard_idx
<
request_call_num
;
++
shard_idx
)
{
...
...
@@ -1542,7 +1540,6 @@ int BrpcPsClient::PushSparseAsyncShardMerge(
std
::
vector
<
int
>
&
request_kv_num
,
int
table_id
,
int
shard_idx
,
ValueAccessor
*
accessor
)
{
size_t
merged_kv_count
=
0
;
uint64_t
min_key
=
UINT64_MAX
;
uint32_t
value_size
=
accessor
->
GetAccessorInfo
().
update_size
;
thread_local
std
::
vector
<
std
::
pair
<
uint64_t
,
const
float
*>>
sorted_kv_list
;
...
...
@@ -1771,8 +1768,6 @@ void BrpcPsClient::PushDenseTaskConsume() {
accessor
->
Merge
(
&
total_send_data
,
&
merge_data
,
total_send_data_size
);
#pragma optimize("", off)
auto
*
debug_closure
=
closure
;
auto
*
debug_task
=
async_task
;
delete
async_task
;
#pragma optimize("", on)
return
0
;
...
...
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
浏览文件 @
2bef8a48
...
...
@@ -648,7 +648,6 @@ int32_t BrpcPsService::SaveAllTable(Table *table,
PsResponseMessage
&
response
,
brpc
::
Controller
*
cntl
)
{
auto
&
table_map
=
*
(
_server
->
GetTable
());
int32_t
all_feasign_size
=
0
;
int32_t
feasign_size
=
0
;
for
(
auto
&
itr
:
table_map
)
{
...
...
@@ -827,7 +826,6 @@ int32_t BrpcPsService::PushGlobalStep(Table *table,
set_response_code
(
response
,
0
,
"run_program data is empty"
);
return
0
;
}
uint32_t
num
=
*
(
const
uint32_t
*
)(
request
.
data
().
data
());
const
int64_t
*
values
=
(
const
int64_t
*
)(
request
.
data
().
data
()
+
sizeof
(
uint32_t
));
auto
trainer_id
=
request
.
client_id
();
...
...
paddle/fluid/distributed/ps/service/communicator/communicator.cc
浏览文件 @
2bef8a48
...
...
@@ -658,8 +658,6 @@ void AsyncCommunicator::PushSparseFromTensorAsync(
// TODO(zhaocaibei123): check type of show/clk is int? float? uint64?
// const long int* show_tensor = shows->data<int64_t>();
// const long int* clk_tensor = clks->data<int64_t>();
const
int64_t
*
show_tensor
=
shows
->
data
<
int64_t
>
();
const
int64_t
*
clk_tensor
=
clks
->
data
<
int64_t
>
();
for
(
size_t
index
=
0
;
index
<
inputs
->
size
();
++
index
)
{
framework
::
LoDTensor
*
g_tensor
=
outputs
->
at
(
index
);
...
...
paddle/fluid/distributed/ps/service/graph_brpc_client.cc
浏览文件 @
2bef8a48
...
...
@@ -491,7 +491,6 @@ std::future<int32_t> GraphBrpcClient::random_sample_nodes(
butil
::
IOBufBytesIterator
io_buffer_itr
(
res_io_buffer
);
size_t
bytes_size
=
io_buffer_itr
.
bytes_left
();
char
*
buffer
=
new
char
[
bytes_size
];
auto
size
=
io_buffer_itr
.
copy_and_forward
((
void
*
)(
buffer
),
bytes_size
);
size_t
index
=
0
;
while
(
index
<
bytes_size
)
{
ids
.
push_back
(
*
(
int64_t
*
)(
buffer
+
index
));
...
...
paddle/fluid/distributed/ps/service/graph_brpc_server.cc
浏览文件 @
2bef8a48
...
...
@@ -498,8 +498,7 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers(
}
int
idx_
=
*
(
int
*
)(
request
.
params
(
0
).
c_str
());
size_t
node_num
=
request
.
params
(
1
).
size
()
/
sizeof
(
int64_t
),
size_of_size_t
=
sizeof
(
size_t
);
size_t
node_num
=
request
.
params
(
1
).
size
()
/
sizeof
(
int64_t
);
int64_t
*
node_data
=
(
int64_t
*
)(
request
.
params
(
1
).
c_str
());
int
sample_size
=
*
(
int64_t
*
)(
request
.
params
(
2
).
c_str
());
bool
need_weight
=
*
(
int64_t
*
)(
request
.
params
(
3
).
c_str
());
...
...
@@ -572,7 +571,6 @@ int32_t GraphBrpcService::sample_neighbors_across_multi_servers(
failed
[
request2server
[
request_idx
]]
=
true
;
}
else
{
auto
&
res_io_buffer
=
closure
->
cntl
(
request_idx
)
->
response_attachment
();
size_t
node_size
;
res
[
request_idx
].
reset
(
new
butil
::
IOBufBytesIterator
(
res_io_buffer
));
size_t
num
;
res
[
request_idx
]
->
copy_and_forward
(
&
num
,
sizeof
(
size_t
));
...
...
paddle/fluid/distributed/ps/service/ps_local_client.cc
浏览文件 @
2bef8a48
...
...
@@ -283,7 +283,6 @@ int32_t PsLocalClient::Initialize() {
size_t
table_id
,
const
uint64_t
*
keys
,
const
float
**
update_values
,
size_t
num
,
void
*
callback
)
{
PSClientClosure
*
closure
=
reinterpret_cast
<
PSClientClosure
*>
(
callback
);
auto
*
accessor
=
GetTableAccessor
(
table_id
);
auto
*
table_ptr
=
GetTable
(
table_id
);
TableContext
table_context
;
...
...
@@ -303,7 +302,6 @@ int32_t PsLocalClient::Initialize() {
const
uint64_t
*
keys
,
const
float
**
update_values
,
size_t
num
)
{
auto
*
accessor
=
GetTableAccessor
(
table_id
);
auto
*
table_ptr
=
GetTable
(
table_id
);
TableContext
table_context
;
...
...
paddle/fluid/distributed/ps/table/ctr_accessor.cc
浏览文件 @
2bef8a48
...
...
@@ -61,8 +61,6 @@ void CtrCommonAccessor::InitAccessorInfo() {
}
bool
CtrCommonAccessor
::
Shrink
(
float
*
value
)
{
auto
base_threshold
=
_config
.
ctr_accessor_param
().
base_threshold
();
auto
delta_threshold
=
_config
.
ctr_accessor_param
().
delta_threshold
();
auto
delete_after_unseen_days
=
_config
.
ctr_accessor_param
().
delete_after_unseen_days
();
auto
delete_threshold
=
_config
.
ctr_accessor_param
().
delete_threshold
();
...
...
@@ -171,7 +169,6 @@ void CtrCommonAccessor::UpdateStatAfterSave(float* value, int param) {
}
int32_t
CtrCommonAccessor
::
Create
(
float
**
values
,
size_t
num
)
{
auto
embedx_dim
=
_config
.
embedx_dim
();
for
(
size_t
value_item
=
0
;
value_item
<
num
;
++
value_item
)
{
float
*
value
=
values
[
value_item
];
value
[
common_feature_value
.
UnseenDaysIndex
()]
=
0
;
...
...
@@ -245,7 +242,6 @@ int32_t CtrCommonAccessor::Merge(float** update_values,
// second dim: field num
int32_t
CtrCommonAccessor
::
Update
(
float
**
update_values
,
const
float
**
push_values
,
size_t
num
)
{
auto
embedx_dim
=
_config
.
embedx_dim
();
for
(
size_t
value_item
=
0
;
value_item
<
num
;
++
value_item
)
{
float
*
update_value
=
update_values
[
value_item
];
const
float
*
push_value
=
push_values
[
value_item
];
...
...
@@ -330,8 +326,6 @@ std::string CtrCommonAccessor::ParseToString(const float* v, int param) {
}
int
CtrCommonAccessor
::
ParseFromString
(
const
std
::
string
&
str
,
float
*
value
)
{
int
embedx_dim
=
_config
.
embedx_dim
();
_embedx_sgd_rule
->
InitValue
(
value
+
common_feature_value
.
EmbedxWIndex
(),
value
+
common_feature_value
.
EmbedxG2SumIndex
());
auto
ret
=
paddle
::
string
::
str_to_float
(
str
.
data
(),
value
);
...
...
paddle/fluid/distributed/ps/table/ctr_double_accessor.cc
浏览文件 @
2bef8a48
...
...
@@ -59,8 +59,6 @@ bool CtrDoubleAccessor::Shrink(float* value) {
// auto base_threshold = _config.ctr_accessor_param().base_threshold();
// auto delta_threshold = _config.ctr_accessor_param().delta_threshold();
// auto delete_threshold = _config.ctr_accessor_param().delete_threshold();
auto
base_threshold
=
_config
.
ctr_accessor_param
().
base_threshold
();
auto
delta_threshold
=
_config
.
ctr_accessor_param
().
delta_threshold
();
auto
delete_after_unseen_days
=
_config
.
ctr_accessor_param
().
delete_after_unseen_days
();
auto
delete_threshold
=
_config
.
ctr_accessor_param
().
delete_threshold
();
...
...
@@ -170,7 +168,6 @@ void CtrDoubleAccessor::UpdateStatAfterSave(float* value, int param) {
}
int32_t
CtrDoubleAccessor
::
Create
(
float
**
values
,
size_t
num
)
{
auto
embedx_dim
=
_config
.
embedx_dim
();
for
(
size_t
value_item
=
0
;
value_item
<
num
;
++
value_item
)
{
float
*
value
=
values
[
value_item
];
value
[
CtrDoubleFeatureValue
::
UnseenDaysIndex
()]
=
0
;
...
...
@@ -246,7 +243,6 @@ int32_t CtrDoubleAccessor::Merge(float** update_values,
// second dim: field num
int32_t
CtrDoubleAccessor
::
Update
(
float
**
update_values
,
const
float
**
push_values
,
size_t
num
)
{
auto
embedx_dim
=
_config
.
embedx_dim
();
for
(
size_t
value_item
=
0
;
value_item
<
num
;
++
value_item
)
{
float
*
update_value
=
update_values
[
value_item
];
const
float
*
push_value
=
push_values
[
value_item
];
...
...
paddle/fluid/distributed/ps/table/ctr_dymf_accessor.cc
浏览文件 @
2bef8a48
...
...
@@ -62,8 +62,6 @@ void CtrDymfAccessor::InitAccessorInfo() {
}
bool
CtrDymfAccessor
::
Shrink
(
float
*
value
)
{
auto
base_threshold
=
_config
.
ctr_accessor_param
().
base_threshold
();
auto
delta_threshold
=
_config
.
ctr_accessor_param
().
delta_threshold
();
auto
delete_after_unseen_days
=
_config
.
ctr_accessor_param
().
delete_after_unseen_days
();
auto
delete_threshold
=
_config
.
ctr_accessor_param
().
delete_threshold
();
...
...
@@ -172,7 +170,6 @@ void CtrDymfAccessor::UpdateStatAfterSave(float* value, int param) {
}
int32_t
CtrDymfAccessor
::
Create
(
float
**
values
,
size_t
num
)
{
auto
embedx_dim
=
_config
.
embedx_dim
();
for
(
size_t
value_item
=
0
;
value_item
<
num
;
++
value_item
)
{
float
*
value
=
values
[
value_item
];
value
[
common_feature_value
.
UnseenDaysIndex
()]
=
0
;
...
...
paddle/fluid/distributed/ps/table/memory_dense_table.cc
浏览文件 @
2bef8a48
...
...
@@ -249,7 +249,6 @@ int32_t MemoryDenseTable::Load(const std::string& path,
float
data_buffer
[
5
];
float
*
data_buff_ptr
=
data_buffer
;
std
::
string
line_data
;
int
size
=
static_cast
<
int
>
(
values_
.
size
());
auto
common
=
_config
.
common
();
for
(
size_t
i
=
start_file_idx
;
i
<
end_file_idx
+
1
;
++
i
)
{
...
...
@@ -354,7 +353,6 @@ int32_t MemoryDenseTable::Save(const std::string& path,
}
else
{
std
::
ostringstream
os
;
for
(
int
x
=
0
;
x
<
size
;
++
x
)
{
auto
&
varname
=
common
.
params
()[
x
];
int
dim
=
common
.
dims
()[
x
];
VLOG
(
3
)
<<
"MemoryDenseTable::save dim "
<<
x
<<
" size: "
<<
dim
;
for
(
int
y
=
0
;
y
<
dim
;
++
y
)
{
...
...
paddle/fluid/distributed/ps/table/memory_sparse_table.cc
浏览文件 @
2bef8a48
...
...
@@ -163,8 +163,6 @@ int32_t MemorySparseTable::LoadLocalFS(const std::string& path,
const
std
::
string
&
param
)
{
std
::
string
table_path
=
TableDir
(
path
);
auto
file_list
=
paddle
::
framework
::
localfs_list
(
table_path
);
int
load_param
=
atoi
(
param
.
c_str
());
size_t
expect_shard_num
=
_sparse_table_shard_num
;
if
(
file_list
.
size
()
!=
expect_shard_num
)
{
LOG
(
WARNING
)
<<
"MemorySparseTable file_size:"
<<
file_list
.
size
()
...
...
paddle/fluid/distributed/ps/table/sparse_accessor.cc
浏览文件 @
2bef8a48
...
...
@@ -55,8 +55,6 @@ void SparseAccessor::InitAccessorInfo() {
}
bool
SparseAccessor
::
Shrink
(
float
*
value
)
{
auto
base_threshold
=
_config
.
ctr_accessor_param
().
base_threshold
();
auto
delta_threshold
=
_config
.
ctr_accessor_param
().
delta_threshold
();
auto
delete_after_unseen_days
=
_config
.
ctr_accessor_param
().
delete_after_unseen_days
();
auto
delete_threshold
=
_config
.
ctr_accessor_param
().
delete_threshold
();
...
...
@@ -146,7 +144,6 @@ void SparseAccessor::UpdateStatAfterSave(float* value, int param) {
}
int32_t
SparseAccessor
::
Create
(
float
**
values
,
size_t
num
)
{
auto
embedx_dim
=
_config
.
embedx_dim
();
for
(
size_t
value_item
=
0
;
value_item
<
num
;
++
value_item
)
{
float
*
value
=
values
[
value_item
];
value
[
sparse_feature_value
.
UnseenDaysIndex
()]
=
0
;
...
...
@@ -215,7 +212,6 @@ int32_t SparseAccessor::Merge(float** update_values,
// second dim: field num
int32_t
SparseAccessor
::
Update
(
float
**
update_values
,
const
float
**
push_values
,
size_t
num
)
{
auto
embedx_dim
=
_config
.
embedx_dim
();
for
(
size_t
value_item
=
0
;
value_item
<
num
;
++
value_item
)
{
float
*
update_value
=
update_values
[
value_item
];
const
float
*
push_value
=
push_values
[
value_item
];
...
...
@@ -294,8 +290,6 @@ std::string SparseAccessor::ParseToString(const float* v, int param) {
}
int
SparseAccessor
::
ParseFromString
(
const
std
::
string
&
str
,
float
*
value
)
{
int
embedx_dim
=
_config
.
embedx_dim
();
_embedx_sgd_rule
->
InitValue
(
value
+
sparse_feature_value
.
EmbedxWIndex
(),
value
+
sparse_feature_value
.
EmbedxG2SumIndex
());
auto
ret
=
paddle
::
string
::
str_to_float
(
str
.
data
(),
value
);
...
...
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
浏览文件 @
2bef8a48
...
...
@@ -290,9 +290,6 @@ int64_t SSDSparseTable::LocalSize() {
local_size
+=
_local_shards
[
i
].
size
();
}
// TODO rocksdb size
uint64_t
ssd_size
=
0
;
// _db->get_estimate_key_num(ssd_size);
// return local_size + ssd_size;
return
local_size
;
}
...
...
@@ -473,7 +470,6 @@ int64_t SSDSparseTable::CacheShuffle(
}
int
shuffle_node_num
=
_config
.
sparse_table_cache_file_num
();
LOG
(
INFO
)
<<
"Table>> shuffle node num is: "
<<
shuffle_node_num
;
size_t
file_start_idx
=
_avg_local_shard_num
*
_shard_idx
;
int
thread_num
=
_real_local_shard_num
<
20
?
_real_local_shard_num
:
20
;
std
::
vector
<
...
...
@@ -578,7 +574,6 @@ int32_t SSDSparseTable::SaveCache(
return
0
;
}
int
save_param
=
atoi
(
param
.
c_str
());
// batch_model:0 xbox:1
size_t
file_start_idx
=
_avg_local_shard_num
*
_shard_idx
;
std
::
string
table_path
=
paddle
::
string
::
format_string
(
"%s/%03d_cache/"
,
path
.
c_str
(),
_config
.
table_id
());
_afs_client
.
remove
(
paddle
::
string
::
format_string
(
...
...
paddle/fluid/distributed/test/barrier_table_test.cc
浏览文件 @
2bef8a48
...
...
@@ -26,7 +26,6 @@ namespace paddle {
namespace
distributed
{
TEST
(
BarrierTable
,
Barrier
)
{
int
emb_dim
=
10
;
int
trainers
=
2
;
bool
sync
=
true
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录