Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
2a795dfa
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2a795dfa
编写于
6月 21, 2022
作者:
Z
zhangchunle
提交者:
GitHub
6月 21, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix sign-compare warning4 (#43625)
上级
007f3614
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
24 addition
and
18 deletion
+24
-18
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
+1
-1
paddle/fluid/distributed/ps/service/communicator/communicator.cc
...fluid/distributed/ps/service/communicator/communicator.cc
+1
-1
paddle/fluid/distributed/ps/table/barrier_table.cc
paddle/fluid/distributed/ps/table/barrier_table.cc
+1
-1
paddle/fluid/distributed/ps/table/ctr_double_accessor.cc
paddle/fluid/distributed/ps/table/ctr_double_accessor.cc
+1
-1
paddle/fluid/distributed/ps/table/memory_dense_table.cc
paddle/fluid/distributed/ps/table/memory_dense_table.cc
+1
-1
paddle/fluid/distributed/ps/table/memory_sparse_table.cc
paddle/fluid/distributed/ps/table/memory_sparse_table.cc
+2
-1
paddle/fluid/distributed/ps/table/sparse_accessor.cc
paddle/fluid/distributed/ps/table/sparse_accessor.cc
+1
-1
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
+3
-2
paddle/fluid/distributed/ps/wrapper/fleet.cc
paddle/fluid/distributed/ps/wrapper/fleet.cc
+12
-8
paddle/fluid/distributed/test/ctr_accessor_test.cc
paddle/fluid/distributed/test/ctr_accessor_test.cc
+1
-1
未找到文件。
paddle/fluid/distributed/ps/service/brpc_ps_server.cc
浏览文件 @
2a795dfa
...
...
@@ -136,7 +136,7 @@ std::future<int32_t> BrpcPsServer::SendPServer2PServerMsg(
int
msg_type
,
int
to_pserver_id
,
const
std
::
string
&
msg
)
{
auto
promise
=
std
::
make_shared
<
std
::
promise
<
int32_t
>>
();
std
::
future
<
int
>
fut
=
promise
->
get_future
();
if
(
to_pserver_id
>=
_pserver_channels
.
size
())
{
if
(
static_cast
<
size_t
>
(
to_pserver_id
)
>=
_pserver_channels
.
size
())
{
LOG
(
FATAL
)
<<
"to_pserver_id is out of range pservers, which size is "
<<
_pserver_channels
.
size
();
promise
->
set_value
(
-
1
);
...
...
paddle/fluid/distributed/ps/service/communicator/communicator.cc
浏览文件 @
2a795dfa
...
...
@@ -727,7 +727,7 @@ void AsyncCommunicator::PushSparseFromTensorAsync(
++
input_idx
;
}
}
CHECK
(
static_cast
<
size
_t
>
(
output_len
)
==
g_tensor
->
numel
());
CHECK
(
static_cast
<
int64
_t
>
(
output_len
)
==
g_tensor
->
numel
());
}
std
::
vector
<
float
*>
push_g_vec
(
input_idx
,
nullptr
);
...
...
paddle/fluid/distributed/ps/table/barrier_table.cc
浏览文件 @
2a795dfa
...
...
@@ -42,7 +42,7 @@ int32_t BarrierTable::Barrier(const uint32_t trainer_id,
<<
" add trainer id: "
<<
trainer_id
;
}
if
(
trainer_ids_
.
size
(
)
<
trigger_
.
load
())
{
if
(
static_cast
<
int
>
(
trainer_ids_
.
size
()
)
<
trigger_
.
load
())
{
std
::
vector
<
uint32_t
>
diffs
(
trainer_all_
.
size
());
auto
iter
=
std
::
set_difference
(
trainer_all_
.
begin
(),
trainer_all_
.
end
(),
trainer_ids_
.
begin
(),
trainer_ids_
.
end
(),
...
...
paddle/fluid/distributed/ps/table/ctr_double_accessor.cc
浏览文件 @
2a795dfa
...
...
@@ -234,7 +234,7 @@ int32_t CtrDoubleAccessor::Merge(float** update_values,
update_value[i] += other_update_value[i];
}*/
for
(
size_t
i
=
0
;
i
<
total_dim
;
++
i
)
{
if
(
i
!=
CtrDoublePushValue
::
SlotIndex
())
{
if
(
static_cast
<
int
>
(
i
)
!=
CtrDoublePushValue
::
SlotIndex
())
{
update_value
[
i
]
+=
other_update_value
[
i
];
}
}
...
...
paddle/fluid/distributed/ps/table/memory_dense_table.cc
浏览文件 @
2a795dfa
...
...
@@ -276,7 +276,7 @@ int32_t MemoryDenseTable::Load(const std::string& path,
CHECK
(
str_len
==
param_col_ids_
.
size
())
<<
"expect "
<<
param_col_ids_
.
size
()
<<
" float, but got "
<<
str_len
;
for
(
in
t
col_idx
=
0
;
col_idx
<
str_len
;
++
col_idx
)
{
for
(
size_
t
col_idx
=
0
;
col_idx
<
str_len
;
++
col_idx
)
{
if
(
param_col_ids_
[
col_idx
]
<
0
)
{
continue
;
}
...
...
paddle/fluid/distributed/ps/table/memory_sparse_table.cc
浏览文件 @
2a795dfa
...
...
@@ -53,7 +53,8 @@ int32_t MemorySparseTable::InitializeValue() {
_avg_local_shard_num
=
sparse_local_shard_num
(
_sparse_table_shard_num
,
_shard_num
);
_real_local_shard_num
=
_avg_local_shard_num
;
if
(
_real_local_shard_num
*
(
_shard_idx
+
1
)
>
_sparse_table_shard_num
)
{
if
(
static_cast
<
int
>
(
_real_local_shard_num
*
(
_shard_idx
+
1
))
>
_sparse_table_shard_num
)
{
_real_local_shard_num
=
_sparse_table_shard_num
-
_real_local_shard_num
*
_shard_idx
;
_real_local_shard_num
=
...
...
paddle/fluid/distributed/ps/table/sparse_accessor.cc
浏览文件 @
2a795dfa
...
...
@@ -202,7 +202,7 @@ int32_t SparseAccessor::Merge(float** update_values,
float
*
update_value
=
update_values
[
value_item
];
const
float
*
other_update_value
=
other_update_values
[
value_item
];
for
(
size_t
i
=
0
;
i
<
total_dim
;
++
i
)
{
if
(
i
!=
SparsePushValue
::
SlotIndex
())
{
if
(
static_cast
<
int
>
(
i
)
!=
SparsePushValue
::
SlotIndex
())
{
update_value
[
i
]
+=
other_update_value
[
i
];
}
}
...
...
paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
浏览文件 @
2a795dfa
...
...
@@ -637,8 +637,9 @@ int32_t SSDSparseTable::Load(size_t start_idx, size_t end_idx,
size_t
mf_value_size
=
_value_accesor
->
GetAccessorInfo
().
mf_size
/
sizeof
(
float
);
end_idx
=
end_idx
<
_sparse_table_shard_num
?
end_idx
:
_sparse_table_shard_num
;
end_idx
=
static_cast
<
int
>
(
end_idx
)
<
_sparse_table_shard_num
?
end_idx
:
_sparse_table_shard_num
;
int
thread_num
=
(
end_idx
-
start_idx
)
<
20
?
(
end_idx
-
start_idx
)
:
20
;
omp_set_num_threads
(
thread_num
);
#pragma omp parallel for schedule(dynamic)
...
...
paddle/fluid/distributed/ps/wrapper/fleet.cc
浏览文件 @
2a795dfa
...
...
@@ -555,10 +555,12 @@ void FleetWrapper::PushSparseFromTensorAsync(
// in
// ctr_accessor.h
push_values
.
back
()[
0
]
=
2
;
// TODO(zhaocaibei123): slot
push_values
.
back
()[
1
]
=
(
i
>=
show_size
?
1
:
static_cast
<
float
>
(
show_tensor
[
i
]));
push_values
.
back
()[
2
]
=
(
i
>=
clk_size
?
0
:
static_cast
<
float
>
(
clk_tensor
[
i
]));
push_values
.
back
()[
1
]
=
(
static_cast
<
int
>
(
i
)
>=
show_size
?
1
:
static_cast
<
float
>
(
show_tensor
[
i
]));
push_values
.
back
()[
2
]
=
(
static_cast
<
int
>
(
i
)
>=
clk_size
?
0
:
static_cast
<
float
>
(
clk_tensor
[
i
]));
float
*
data
=
push_values
.
back
().
data
()
+
3
;
memcpy
(
data
,
g
+
output_len
,
sizeof
(
float
)
*
fea_dim
);
}
...
...
@@ -582,10 +584,12 @@ void FleetWrapper::PushSparseFromTensorAsync(
// slot show clk grad... consistent with CtrCommonPushValue defined in
// ctr_accessor.h
push_values
.
back
()[
0
]
=
2
;
// TODO(zhaocaibei123): slot
push_values
.
back
()[
1
]
=
(
i
>=
show_size
?
1
:
static_cast
<
float
>
(
show_tensor
[
i
]));
push_values
.
back
()[
2
]
=
(
i
>=
clk_size
?
0
:
static_cast
<
float
>
(
clk_tensor
[
i
]));
push_values
.
back
()[
1
]
=
(
static_cast
<
int
>
(
i
)
>=
show_size
?
1
:
static_cast
<
float
>
(
show_tensor
[
i
]));
push_values
.
back
()[
2
]
=
(
static_cast
<
int
>
(
i
)
>=
clk_size
?
0
:
static_cast
<
float
>
(
clk_tensor
[
i
]));
float
*
data
=
push_values
.
back
().
data
()
+
3
;
memcpy
(
data
,
g
+
output_len
,
sizeof
(
float
)
*
fea_dim
);
}
...
...
paddle/fluid/distributed/test/ctr_accessor_test.cc
浏览文件 @
2a795dfa
...
...
@@ -239,7 +239,7 @@ TEST(downpour_feature_value_accessor_test, test_update) {
push_v
.
show
=
grad
[
i
][
1
];
push_v
.
click
=
grad
[
i
][
2
];
push_v
.
embed_g
=
grad
[
i
][
3
];
for
(
int
j
=
0
;
j
<
parameter
.
embedx_dim
();
++
j
)
{
for
(
unsigned
int
j
=
0
;
j
<
parameter
.
embedx_dim
();
++
j
)
{
push_v
.
embedx_g
.
push_back
(
grad
[
i
][
4
+
j
]);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录