Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
a50d1296
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a50d1296
编写于
6月 16, 2021
作者:
T
Thunderbrook
提交者:
GitHub
6月 16, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add delta score, scale show (#33492)
上级
72d36970
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
20 addition
and
59 deletion
+20
-59
paddle/fluid/framework/fleet/heter_ps/feature_value.h
paddle/fluid/framework/fleet/heter_ps/feature_value.h
+0
-19
paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h
paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h
+4
-27
paddle/fluid/framework/fleet/heter_ps/optimizer_conf.h
paddle/fluid/framework/fleet/heter_ps/optimizer_conf.h
+3
-2
paddle/fluid/framework/io/fs.cc
paddle/fluid/framework/io/fs.cc
+8
-8
python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py
...ncubate/fleet/parameter_server/pslib/optimizer_factory.py
+3
-1
python/paddle/fluid/incubate/fleet/utils/fleet_util.py
python/paddle/fluid/incubate/fleet/utils/fleet_util.py
+1
-1
python/paddle/fluid/log_helper.py
python/paddle/fluid/log_helper.py
+1
-1
未找到文件。
paddle/fluid/framework/fleet/heter_ps/feature_value.h
浏览文件 @
a50d1296
...
@@ -52,25 +52,6 @@ struct FeaturePushValue {
...
@@ -52,25 +52,6 @@ struct FeaturePushValue {
float
lr_g
;
float
lr_g
;
float
mf_g
[
MF_DIM
];
float
mf_g
[
MF_DIM
];
};
};
// class DownpourFixedFeatureValue {
// public:
// DownpourFixedFeatureValue() {}
// ~DownpourFixedFeatureValue() {}
// float* data() {
// return _data.data();
// }
// size_t size() {
// return _data.size();
// }
// void resize(size_t size) {
// _data.resize(size);
// }
// void shrink_to_fit() {
// _data.shrink_to_fit();
// }
// private:
// std::vector<float> _data;
// };
}
// end namespace framework
}
// end namespace framework
}
// end namespace paddle
}
// end namespace paddle
...
...
paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h
浏览文件 @
a50d1296
...
@@ -23,30 +23,6 @@ limitations under the License. */
...
@@ -23,30 +23,6 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
__device__
double
cuda_double_random
(
unsigned
long
long
seed
)
{
// copy from MurmurHash3
seed
^=
seed
>>
33
;
seed
*=
0xff51afd7ed558ccd
;
seed
^=
seed
>>
33
;
seed
*=
0xc4ceb9fe1a85ec53
;
seed
^=
seed
>>
33
;
return
((
double
)
seed
/
18446744073709551615.0
);
}
__device__
float
cuda_normal_random
(
unsigned
long
long
idx
)
{
static
double
pi
=
3.1415926897932384
;
unsigned
long
long
x
=
clock64
()
+
idx
;
double
x1
,
x2
,
res
;
while
(
1
)
{
x1
=
cuda_double_random
(
x
);
x2
=
cuda_double_random
(
x
+
33
);
res
=
sqrt
(
-
2.0
*
log
(
x1
))
*
cos
(
2.0
*
pi
*
x2
);
if
(
-
10
<
res
&&
res
<
10
)
break
;
x
+=
207
;
}
return
res
;
}
template
<
typename
ValType
,
typename
GradType
>
template
<
typename
ValType
,
typename
GradType
>
class
Optimizer
{
class
Optimizer
{
public:
public:
...
@@ -95,11 +71,12 @@ class Optimizer {
...
@@ -95,11 +71,12 @@ class Optimizer {
}
}
__device__
void
update_value
(
ValType
&
val
,
const
GradType
&
grad
)
{
__device__
void
update_value
(
ValType
&
val
,
const
GradType
&
grad
)
{
val
.
slot
=
grad
.
slot
;
val
.
slot
=
grad
.
slot
;
;
val
.
show
+=
grad
.
show
;
val
.
show
+=
grad
.
show
;
val
.
clk
+=
grad
.
clk
;
val
.
clk
+=
grad
.
clk
;
val
.
delta_score
+=
optimizer_config
::
nonclk_coeff
*
(
grad
.
show
-
grad
.
clk
)
+
optimizer_config
::
clk_coeff
*
grad
.
clk
;
update_lr
(
val
.
lr
,
val
.
lr_g2sum
,
grad
.
lr_g
,
1.0
);
update_lr
(
val
.
lr
,
val
.
lr_g2sum
,
grad
.
lr_g
,
grad
.
show
);
if
(
val
.
mf_size
==
0
)
{
if
(
val
.
mf_size
==
0
)
{
if
(
optimizer_config
::
mf_create_thresholds
<=
if
(
optimizer_config
::
mf_create_thresholds
<=
...
@@ -116,7 +93,7 @@ class Optimizer {
...
@@ -116,7 +93,7 @@ class Optimizer {
}
}
}
}
}
else
{
}
else
{
update_mf
(
MF_DIM
,
&
val
.
mf
[
1
],
val
.
mf
[
0
],
grad
.
mf_g
,
1.0
);
update_mf
(
MF_DIM
,
&
val
.
mf
[
1
],
val
.
mf
[
0
],
grad
.
mf_g
,
grad
.
show
);
}
}
}
}
};
};
...
...
paddle/fluid/framework/fleet/heter_ps/optimizer_conf.h
浏览文件 @
a50d1296
...
@@ -16,15 +16,16 @@ limitations under the License. */
...
@@ -16,15 +16,16 @@ limitations under the License. */
namespace
optimizer_config
{
namespace
optimizer_config
{
__constant__
float
mf_create_thresholds
=
0
;
__constant__
float
nonclk_coeff
=
0.1
;
__constant__
float
nonclk_coeff
=
0.1
;
__constant__
float
clk_coeff
=
1
;
__constant__
float
clk_coeff
=
1
;
__constant__
float
min_bound
=
-
10
;
__constant__
float
min_bound
=
-
10
;
__constant__
float
max_bound
=
10
;
__constant__
float
max_bound
=
10
;
__constant__
float
learning_rate
=
0.05
;
__constant__
float
learning_rate
=
0.05
;
__constant__
float
initial_g2sum
=
3.0
;
__constant__
float
initial_g2sum
=
3.0
;
__constant__
float
initial_range
=
1e-4
;
__constant__
float
initial_range
=
0
;
__constant__
float
mf_create_thresholds
=
10
;
__constant__
float
mf_learning_rate
=
0.05
;
__constant__
float
mf_learning_rate
=
0.05
;
__constant__
float
mf_initial_g2sum
=
3.0
;
__constant__
float
mf_initial_g2sum
=
3.0
;
__constant__
float
mf_initial_range
=
1e-4
;
__constant__
float
mf_initial_range
=
1e-4
;
...
...
paddle/fluid/framework/io/fs.cc
浏览文件 @
a50d1296
...
@@ -240,16 +240,16 @@ void set_download_command(const std::string& x) {
...
@@ -240,16 +240,16 @@ void set_download_command(const std::string& x) {
std
::
shared_ptr
<
FILE
>
hdfs_open_read
(
std
::
string
path
,
int
*
err_no
,
std
::
shared_ptr
<
FILE
>
hdfs_open_read
(
std
::
string
path
,
int
*
err_no
,
const
std
::
string
&
converter
)
{
const
std
::
string
&
converter
)
{
if
(
download_cmd
()
!=
""
)
{
// use customized download command
path
=
string
::
format_string
(
"%s
\"
%s
\"
"
,
download_cmd
().
c_str
(),
path
.
c_str
());
}
else
{
if
(
fs_end_with_internal
(
path
,
".gz"
))
{
if
(
fs_end_with_internal
(
path
,
".gz"
))
{
path
=
string
::
format_string
(
"%s -text
\"
%s
\"
"
,
hdfs_command
().
c_str
(),
path
=
string
::
format_string
(
"%s -text
\"
%s
\"
"
,
hdfs_command
().
c_str
(),
path
.
c_str
());
path
.
c_str
());
}
else
{
}
else
{
const
std
::
string
file_path
=
path
;
path
=
string
::
format_string
(
"%s -cat
\"
%s
\"
"
,
hdfs_command
().
c_str
(),
path
=
string
::
format_string
(
"%s -cat
\"
%s
\"
"
,
hdfs_command
().
c_str
(),
file_path
.
c_str
());
path
.
c_str
());
if
(
download_cmd
()
!=
""
)
{
// use customized download command
path
=
string
::
format_string
(
"%s
\"
%s
\"
"
,
download_cmd
().
c_str
(),
file_path
.
c_str
());
}
}
}
}
...
...
python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py
浏览文件 @
a50d1296
...
@@ -24,6 +24,7 @@ from collections import OrderedDict
...
@@ -24,6 +24,7 @@ from collections import OrderedDict
import
copy
import
copy
from
.node
import
DownpourWorker
,
DownpourServer
from
.node
import
DownpourWorker
,
DownpourServer
from
.
import
ps_pb2
as
pslib
from
.
import
ps_pb2
as
pslib
import
os
OpRole
=
core
.
op_proto_and_checker_maker
.
OpRole
OpRole
=
core
.
op_proto_and_checker_maker
.
OpRole
# this dict is for store info about pull/push sparse ops.
# this dict is for store info about pull/push sparse ops.
...
@@ -765,7 +766,8 @@ class DistributedAdam(DistributedOptimizerImplBase):
...
@@ -765,7 +766,8 @@ class DistributedAdam(DistributedOptimizerImplBase):
"user_define_dump_filename"
,
""
)
"user_define_dump_filename"
,
""
)
opt_info
[
"dump_fields_path"
]
=
strategy
.
get
(
"dump_fields_path"
,
""
)
opt_info
[
"dump_fields_path"
]
=
strategy
.
get
(
"dump_fields_path"
,
""
)
opt_info
[
"dump_param"
]
=
strategy
.
get
(
"dump_param"
,
[])
opt_info
[
"dump_param"
]
=
strategy
.
get
(
"dump_param"
,
[])
opt_info
[
"worker_places"
]
=
strategy
.
get
(
"worker_places"
,
[])
gpus_env
=
os
.
getenv
(
"FLAGS_selected_gpus"
)
opt_info
[
"worker_places"
]
=
[
int
(
s
)
for
s
in
gpus_env
.
split
(
","
)]
opt_info
[
"use_ps_gpu"
]
=
strategy
.
get
(
"use_ps_gpu"
,
False
)
opt_info
[
"use_ps_gpu"
]
=
strategy
.
get
(
"use_ps_gpu"
,
False
)
if
server
.
_server
.
downpour_server_param
.
downpour_table_param
[
if
server
.
_server
.
downpour_server_param
.
downpour_table_param
[
0
].
accessor
.
accessor_class
in
[
0
].
accessor
.
accessor_class
in
[
...
...
python/paddle/fluid/incubate/fleet/utils/fleet_util.py
浏览文件 @
a50d1296
...
@@ -32,7 +32,7 @@ OpRole = core.op_proto_and_checker_maker.OpRole
...
@@ -32,7 +32,7 @@ OpRole = core.op_proto_and_checker_maker.OpRole
__all__
=
[
"FleetUtil"
]
__all__
=
[
"FleetUtil"
]
_logger
=
get_logger
(
_logger
=
get_logger
(
__name__
,
logging
.
INFO
,
fmt
=
'%(asctime)s
-
%(levelname)s: %(message)s'
)
__name__
,
logging
.
INFO
,
fmt
=
'%(asctime)s
%(levelname)s: %(message)s'
)
fleet
=
None
fleet
=
None
...
...
python/paddle/fluid/log_helper.py
浏览文件 @
a50d1296
...
@@ -45,7 +45,7 @@ def get_logger(name, level, fmt=None):
...
@@ -45,7 +45,7 @@ def get_logger(name, level, fmt=None):
handler
=
logging
.
StreamHandler
()
handler
=
logging
.
StreamHandler
()
if
fmt
:
if
fmt
:
formatter
=
logging
.
Formatter
(
fmt
=
fmt
)
formatter
=
logging
.
Formatter
(
fmt
=
fmt
,
datefmt
=
'%a %b %d %H:%M:%S'
)
handler
.
setFormatter
(
formatter
)
handler
.
setFormatter
(
formatter
)
logger
.
addHandler
(
handler
)
logger
.
addHandler
(
handler
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录