Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
378037c5
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
378037c5
编写于
2月 02, 2019
作者:
D
dongdaxiang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
make s_instance_ private to ensure singleton
上级
a446d26e
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
9 addition
and
13 deletion
+9
-13
paddle/fluid/framework/device_worker.h
paddle/fluid/framework/device_worker.h
+1
-1
paddle/fluid/framework/downpour_worker.cc
paddle/fluid/framework/downpour_worker.cc
+2
-0
paddle/fluid/framework/fleet/fleet_wrapper.h
paddle/fluid/framework/fleet/fleet_wrapper.h
+5
-2
paddle/fluid/framework/pull_dense_worker.cc
paddle/fluid/framework/pull_dense_worker.cc
+1
-10
未找到文件。
paddle/fluid/framework/device_worker.h
浏览文件 @
378037c5
...
...
@@ -47,7 +47,6 @@ class PullDenseWorker {
void
IncreaseThreadVersion
(
int
thread_id
,
uint64_t
table_id
);
void
ResetThreadVersion
(
uint64_t
table_id
);
void
Wait
(
std
::
vector
<::
std
::
future
<
int32_t
>>*
status_vec
);
static
std
::
shared_ptr
<
PullDenseWorker
>
s_instance_
;
static
std
::
shared_ptr
<
PullDenseWorker
>
GetInstance
()
{
if
(
NULL
==
s_instance_
)
{
s_instance_
.
reset
(
new
paddle
::
framework
::
PullDenseWorker
());
...
...
@@ -61,6 +60,7 @@ class PullDenseWorker {
bool
CheckUpdateParam
(
uint64_t
table_id
);
private:
static
std
::
shared_ptr
<
PullDenseWorker
>
s_instance_
;
std
::
shared_ptr
<
paddle
::
framework
::
FleetWrapper
>
fleet_ptr_
;
PullDenseWorkerParameter
param_
;
Scope
*
root_scope_
;
...
...
paddle/fluid/framework/downpour_worker.cc
浏览文件 @
378037c5
...
...
@@ -58,6 +58,8 @@ void DownpourWorker::Initialize(const TrainerDesc& desc) {
skip_ops_
[
i
]
=
param_
.
skip_ops
(
i
);
}
skip_ops_
.
resize
(
param_
.
skip_ops_size
());
fleet_ptr_
=
FleetWrapper
::
GetInstance
();
}
void
DownpourWorker
::
CollectLabelInfo
(
size_t
table_idx
)
{
...
...
paddle/fluid/framework/fleet/fleet_wrapper.h
浏览文件 @
378037c5
...
...
@@ -40,7 +40,8 @@ namespace framework {
// Async: PullSparseVarsAsync(not implemented currently)
// Push
// Sync: PushSparseVarsSync
// Async: PushSparseVarsAsync
// Async: PushSparseVarsAsync(not implemented currently)
// Async: PushSparseVarsWithLabelAsync(with special usage)
// Push dense variables to server in Async mode
// Param<in>: scope, table_id, var_names
// Param<out>: push_sparse_status
...
...
@@ -109,7 +110,6 @@ class FleetWrapper {
uint64_t
RunServer
();
void
GatherServers
(
const
std
::
vector
<
uint64_t
>&
host_sign_list
,
int
node_num
);
static
std
::
shared_ptr
<
FleetWrapper
>
s_instance_
;
static
std
::
shared_ptr
<
FleetWrapper
>
GetInstance
()
{
if
(
NULL
==
s_instance_
)
{
s_instance_
.
reset
(
new
paddle
::
framework
::
FleetWrapper
());
...
...
@@ -121,6 +121,9 @@ class FleetWrapper {
static
std
::
shared_ptr
<
paddle
::
distributed
::
PSlib
>
pslib_ptr_
;
#endif
private:
static
std
::
shared_ptr
<
FleetWrapper
>
s_instance_
;
private:
FleetWrapper
()
{}
...
...
paddle/fluid/framework/pull_dense_worker.cc
浏览文件 @
378037c5
...
...
@@ -20,31 +20,25 @@ namespace framework {
std
::
shared_ptr
<
PullDenseWorker
>
PullDenseWorker
::
s_instance_
=
NULL
;
void
PullDenseWorker
::
Initialize
(
const
TrainerDesc
&
param
)
{
LOG
(
WARNING
)
<<
"going to initialize pull dense worker"
;
running_
=
false
;
param_
=
param
.
pull_dense_param
();
threshold_
=
param_
.
threshold
();
thread_num_
=
param_
.
device_num
();
sleep_time_ms_
=
param_
.
sleep_time_ms
();
LOG
(
WARNING
)
<<
"dense table size: "
<<
param_
.
dense_table_size
();
LOG
(
WARNING
)
<<
"thread num: "
<<
thread_num_
;
for
(
size_t
i
=
0
;
i
<
param_
.
dense_table_size
();
++
i
)
{
// setup dense variables for each table
int
var_num
=
param_
.
dense_table
(
i
).
dense_value_name_size
();
LOG
(
WARNING
)
<<
"var num: "
<<
var_num
;
uint64_t
tid
=
static_cast
<
uint64_t
>
(
param_
.
dense_table
(
i
).
table_id
());
dense_value_names_
[
tid
].
resize
(
var_num
);
for
(
int
j
=
0
;
j
<
var_num
;
++
j
)
{
dense_value_names_
[
tid
][
j
]
=
param_
.
dense_table
(
i
).
dense_value_name
(
j
);
LOG
(
WARNING
)
<<
"dense value names "
<<
j
<<
" "
<<
dense_value_names_
[
tid
][
j
];
}
// setup training version for each table
training_versions_
[
tid
].
resize
(
thread_num_
,
0
);
last_versions_
[
tid
]
=
0
;
current_version_
[
tid
]
=
0
;
}
LOG
(
WARNING
)
<<
"initialize pull dense worker done."
;
fleet_ptr_
=
FleetWrapper
::
GetInstance
()
;
}
void
PullDenseWorker
::
Wait
(
std
::
vector
<::
std
::
future
<
int32_t
>>*
status_vec
)
{
...
...
@@ -98,10 +92,7 @@ void PullDenseWorker::Run() {
}
void
PullDenseWorker
::
IncreaseThreadVersion
(
int
thread_id
,
uint64_t
table_id
)
{
LOG
(
WARNING
)
<<
"increase thread version input: "
<<
thread_id
<<
" table id "
<<
table_id
;
std
::
lock_guard
<
std
::
mutex
>
lock
(
mutex_for_version_
);
LOG
(
WARNING
)
<<
"going to increase"
;
training_versions_
[
table_id
][
thread_id
]
++
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录