Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
e492ee24
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e492ee24
编写于
3月 28, 2023
作者:
N
Nyakku Shigure
提交者:
GitHub
3月 28, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix a typo, `sheduler` -> `scheduler` (#52149)
上级
e57051b4
变更
13
显示空白变更内容
内联
并排
Showing
13 changed file
with
90 addition
and
82 deletion
+90
-82
python/paddle/distributed/passes/ps_server_pass.py
python/paddle/distributed/passes/ps_server_pass.py
+14
-14
python/paddle/distributed/passes/ps_trainer_pass.py
python/paddle/distributed/passes/ps_trainer_pass.py
+1
-1
python/paddle/fluid/compiler.py
python/paddle/fluid/compiler.py
+6
-6
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+26
-26
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+2
-2
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+1
-1
python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py
.../paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py
+2
-2
python/paddle/fluid/tests/unittests/test_dist_base.py
python/paddle/fluid/tests/unittests/test_dist_base.py
+8
-8
python/paddle/fluid/tests/unittests/test_newprofiler.py
python/paddle/fluid/tests/unittests/test_newprofiler.py
+4
-4
python/paddle/incubate/distributed/fleet/parameter_server/ir/public.py
.../incubate/distributed/fleet/parameter_server/ir/public.py
+20
-14
python/paddle/incubate/distributed/fleet/parameter_server/ir/trainer_pass.py
...ate/distributed/fleet/parameter_server/ir/trainer_pass.py
+1
-1
python/paddle/jit/dy2static/partial_program.py
python/paddle/jit/dy2static/partial_program.py
+4
-2
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+1
-1
未找到文件。
python/paddle/distributed/passes/ps_server_pass.py
浏览文件 @
e492ee24
...
...
@@ -69,7 +69,7 @@ class AddLrDecayTablePass(PassBase):
]
=
tensor_table_class
attrs
[
'tensor_table'
]
=
tensor_table_dict
def
_get_lr_s
heduler_program
(
self
,
lr_s
heduler
,
lr_decay_steps
):
def
_get_lr_s
cheduler_program
(
self
,
lr_sc
heduler
,
lr_decay_steps
):
schedler_decay
=
[
'NoamDecay'
,
'NaturalExpDecay'
,
...
...
@@ -81,12 +81,12 @@ class AddLrDecayTablePass(PassBase):
decay_startup_program
=
paddle
.
static
.
Program
()
lr_name
=
""
if
isinstance
(
lr_sheduler
,
ExponentialDecay
):
if
isinstance
(
lr_s
c
heduler
,
ExponentialDecay
):
with
paddle
.
static
.
program_guard
(
decay_main_program
,
decay_startup_program
):
lr
=
exponential_decay
(
1.0
,
lr_decay_steps
,
lr_sheduler
.
gamma
,
True
1.0
,
lr_decay_steps
,
lr_s
c
heduler
.
gamma
,
True
)
lr_name
=
lr
.
name
logging
.
warn
(
...
...
@@ -96,24 +96,24 @@ class AddLrDecayTablePass(PassBase):
"
\t
strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP }
\n
"
%
lr_decay_steps
)
elif
isinstance
(
lr_sheduler
,
NoamDecay
):
elif
isinstance
(
lr_s
c
heduler
,
NoamDecay
):
with
paddle
.
static
.
program_guard
(
decay_main_program
,
decay_startup_program
):
lr
=
noam_decay
(
lr_s
heduler
.
d_model
,
lr_s
heduler
.
warmup_steps
,
1.0
lr_s
cheduler
.
d_model
,
lr_sc
heduler
.
warmup_steps
,
1.0
)
lr_name
=
lr
.
name
logging
.
warn
(
"NoamDecay is set, warmup steps is [ %d ]"
%
lr_sheduler
.
warmup_steps
%
lr_s
c
heduler
.
warmup_steps
)
elif
isinstance
(
lr_sheduler
,
NaturalExpDecay
):
elif
isinstance
(
lr_s
c
heduler
,
NaturalExpDecay
):
with
paddle
.
static
.
program_guard
(
decay_main_program
,
decay_startup_program
):
lr
=
natural_exp_decay
(
1.0
,
lr_decay_steps
,
lr_sheduler
.
gamma
,
True
1.0
,
lr_decay_steps
,
lr_s
c
heduler
.
gamma
,
True
)
lr_name
=
lr
.
name
logging
.
warn
(
...
...
@@ -123,12 +123,12 @@ class AddLrDecayTablePass(PassBase):
"
\t
strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP }
\n
"
%
lr_decay_steps
)
elif
isinstance
(
lr_sheduler
,
InverseTimeDecay
):
elif
isinstance
(
lr_s
c
heduler
,
InverseTimeDecay
):
with
paddle
.
static
.
program_guard
(
decay_main_program
,
decay_startup_program
):
lr
=
inverse_time_decay
(
1.0
,
lr_decay_steps
,
lr_sheduler
.
gamma
,
True
1.0
,
lr_decay_steps
,
lr_s
c
heduler
.
gamma
,
True
)
lr_name
=
lr
.
name
logging
.
warn
(
...
...
@@ -149,11 +149,11 @@ class AddLrDecayTablePass(PassBase):
def
_apply_single_impl
(
self
,
main_program
,
startup_program
,
pass_ctx
):
attrs
=
pass_ctx
.
_attrs
if
not
hasattr
(
attrs
[
'origin_main_program'
],
'lr_sheduler'
):
if
not
hasattr
(
attrs
[
'origin_main_program'
],
'lr_s
c
heduler'
):
return
assert
isinstance
(
attrs
[
'origin_main_program'
].
lr_sheduler
,
LRScheduler
attrs
[
'origin_main_program'
].
lr_s
c
heduler
,
LRScheduler
),
"must be LRScheduler"
ops
=
get_optimize_ops
(
attrs
[
'origin_main_program'
])
...
...
@@ -161,8 +161,8 @@ class AddLrDecayTablePass(PassBase):
lr_decay_main_program
,
lr_decay_startup_program
,
lr_name
,
)
=
self
.
_get_lr_sheduler_program
(
attrs
[
'origin_main_program'
].
lr_sheduler
,
attrs
[
'lr_decay_steps'
]
)
=
self
.
_get_lr_s
c
heduler_program
(
attrs
[
'origin_main_program'
].
lr_s
c
heduler
,
attrs
[
'lr_decay_steps'
]
)
self
.
_add_tensor_table
(
attrs
,
...
...
python/paddle/distributed/passes/ps_trainer_pass.py
浏览文件 @
e492ee24
...
...
@@ -612,7 +612,7 @@ class DeleteOptimizesPass(PassBase):
main_program
,
remote_optimize_ops
,
local_optimize_ops
)
if
hasattr
(
attrs
[
'origin_main_program'
],
'lr_sheduler'
):
if
hasattr
(
attrs
[
'origin_main_program'
],
'lr_s
c
heduler'
):
self
.
_add_lr_var
(
main_program
,
attrs
)
...
...
python/paddle/fluid/compiler.py
浏览文件 @
e492ee24
...
...
@@ -1235,15 +1235,15 @@ class IpuCompiledProgram:
convert_pass
.
apply
(
self
.
_graph
)
program
=
framework
.
Program
.
_construct_from_desc
(
desc
)
if
hasattr
(
self
.
_program
,
'lr_sheduler'
):
if
hasattr
(
self
.
_program
,
'lr_s
c
heduler'
):
# how to share var between two different block ?
lr_var_name
=
self
.
_program
.
lr_sheduler
.
_var_name
lr_var_name
=
self
.
_program
.
lr_s
c
heduler
.
_var_name
program
.
lr_s
heduler
=
self
.
_program
.
lr_s
heduler
# Program.clone will clone lr_sheduler, so i set lr_var as
# lr_sheduler attribute
program
.
lr_s
cheduler
=
self
.
_program
.
lr_sc
heduler
# Program.clone will clone lr_s
c
heduler, so i set lr_var as
# lr_s
c
heduler attribute
global_block
=
self
.
_program
.
global_block
()
program
.
lr_sheduler
.
lr_var
=
global_block
.
vars
[
lr_var_name
]
program
.
lr_s
c
heduler
.
lr_var
=
global_block
.
vars
[
lr_var_name
]
# with popart, we need to support batches_per_step, what means
# the shape of feed_var and feed_tensor(maybe numpy array) will
...
...
python/paddle/fluid/executor.py
浏览文件 @
e492ee24
...
...
@@ -871,8 +871,8 @@ class _ExecutorCache:
ir_graph
=
framework
.
IrGraph
(
compiled_program
.
_graph
)
converted_program
=
ir_graph
.
to_program
()
if
hasattr
(
inner_program
,
'lr_sheduler'
):
converted_program
.
lr_s
heduler
=
inner_program
.
lr_s
heduler
if
hasattr
(
inner_program
,
'lr_s
c
heduler'
):
converted_program
.
lr_s
cheduler
=
inner_program
.
lr_sc
heduler
inner_program
=
converted_program
# print(f"Program after convert:\n {inner_program}", flush=True)
...
...
@@ -1657,17 +1657,17 @@ class Executor:
)
self
.
_feed_data
(
program
,
feed
,
feed_var_name
,
scope
)
if
hasattr
(
program
,
'lr_sheduler'
):
if
hasattr
(
program
,
'lr_s
c
heduler'
):
from
paddle.optimizer.lr
import
LRScheduler
assert
isinstance
(
program
.
lr_sheduler
,
LRScheduler
program
.
lr_s
c
heduler
,
LRScheduler
),
"must be LRScheduler"
lr_s
heduler
=
program
.
lr_s
heduler
lr_value
=
lr_sheduler
()
lr_var
=
program
.
global_block
().
vars
[
lr_sheduler
.
_var_name
]
lr_s
cheduler
=
program
.
lr_sc
heduler
lr_value
=
lr_s
c
heduler
()
lr_var
=
program
.
global_block
().
vars
[
lr_s
c
heduler
.
_var_name
]
data
=
np
.
array
([
lr_value
]).
astype
(
convert_dtype
(
lr_var
.
dtype
))
tensor
=
core
.
get_variable_tensor
(
scope
,
lr_sheduler
.
_var_name
)
tensor
=
core
.
get_variable_tensor
(
scope
,
lr_s
c
heduler
.
_var_name
)
# NOTE(dev): `tensor.set(data, self.place)` always call TensorCopySync that is a blocking behavior. So we use `_copy_from` to replace it.
cpu_tensor
=
_as_lodtensor
(
data
,
core
.
CPUPlace
())
if
core
.
is_cuda_graph_capturing
():
...
...
@@ -1810,15 +1810,15 @@ class Executor:
)
self
.
_feed_data
(
program
,
feed
,
feed_var_name
,
scope
)
if
hasattr
(
program
,
'lr_s
hedule
r'
):
if
hasattr
(
program
,
'lr_s
cheduler
r'
):
assert
isinstance
(
program
.
lr_sheduler
,
LRScheduler
program
.
lr_s
c
heduler
,
LRScheduler
),
"must be LRScheduler"
lr_s
heduler
=
program
.
lr_s
heduler
lr_value
=
lr_sheduler
()
lr_var
=
program
.
global_block
().
vars
[
lr_sheduler
.
_var_name
]
lr_s
cheduler
=
program
.
lr_sc
heduler
lr_value
=
lr_s
c
heduler
()
lr_var
=
program
.
global_block
().
vars
[
lr_s
c
heduler
.
_var_name
]
data
=
np
.
array
([
lr_value
]).
astype
(
convert_dtype
(
lr_var
.
dtype
))
tensor
=
core
.
get_variable_tensor
(
scope
,
lr_sheduler
.
_var_name
)
tensor
=
core
.
get_variable_tensor
(
scope
,
lr_s
c
heduler
.
_var_name
)
tensor
.
set
(
data
,
self
.
place
)
if
not
use_program_cache
:
...
...
@@ -2588,14 +2588,14 @@ class Executor:
from
paddle.optimizer.lr
import
LRScheduler
if
hasattr
(
program
,
'lr_sheduler'
):
lr_s
heduler
=
program
.
lr_s
heduler
assert
isinstance
(
lr_sheduler
,
LRScheduler
),
"must be LRScheduler"
lr_value
=
lr_sheduler
()
lr_var
=
program
.
global_block
().
vars
[
lr_sheduler
.
_var_name
]
if
hasattr
(
program
,
'lr_s
c
heduler'
):
lr_s
cheduler
=
program
.
lr_sc
heduler
assert
isinstance
(
lr_s
c
heduler
,
LRScheduler
),
"must be LRScheduler"
lr_value
=
lr_s
c
heduler
()
lr_var
=
program
.
global_block
().
vars
[
lr_s
c
heduler
.
_var_name
]
data
=
np
.
array
([
lr_value
]).
astype
(
convert_dtype
(
lr_var
.
dtype
))
tensor
=
core
.
get_variable_tensor
(
cached_scope
,
lr_sheduler
.
_var_name
cached_scope
,
lr_s
c
heduler
.
_var_name
)
tensor
.
set
(
data
,
self
.
place
)
...
...
@@ -2732,13 +2732,13 @@ class Executor:
from
paddle.optimizer.lr
import
LRScheduler
if
hasattr
(
program
,
'lr_sheduler'
):
lr_s
heduler
=
program
.
lr_s
heduler
assert
isinstance
(
lr_sheduler
,
LRScheduler
),
"must be LRScheduler"
lr_value
=
lr_sheduler
()
lr_var
=
program
.
global_block
().
vars
[
lr_sheduler
.
_var_name
]
if
hasattr
(
program
,
'lr_s
c
heduler'
):
lr_s
cheduler
=
program
.
lr_sc
heduler
assert
isinstance
(
lr_s
c
heduler
,
LRScheduler
),
"must be LRScheduler"
lr_value
=
lr_s
c
heduler
()
lr_var
=
program
.
global_block
().
vars
[
lr_s
c
heduler
.
_var_name
]
data
=
np
.
array
([
lr_value
]).
astype
(
convert_dtype
(
lr_var
.
dtype
))
tensor
=
core
.
get_variable_tensor
(
scope
,
lr_sheduler
.
_var_name
)
tensor
=
core
.
get_variable_tensor
(
scope
,
lr_s
c
heduler
.
_var_name
)
tensor
.
set
(
data
,
self
.
place
)
self
.
_default_executor
.
run_from_dataset
(
trainer_instance
)
...
...
python/paddle/fluid/framework.py
浏览文件 @
e492ee24
...
...
@@ -6080,8 +6080,8 @@ class Program:
p
.
_current_role
=
self
.
_current_role
p
.
__op_role_var
=
self
.
__op_role_var
p
.
_appending_grad_times
=
self
.
_appending_grad_times
if
hasattr
(
self
,
'lr_sheduler'
):
p
.
lr_s
heduler
=
self
.
lr_s
heduler
if
hasattr
(
self
,
'lr_s
c
heduler'
):
p
.
lr_s
cheduler
=
self
.
lr_sc
heduler
# NOTE(zhiqiu): we sync the cloned program, to update its program by
# its desc.
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
e492ee24
...
...
@@ -389,7 +389,7 @@ class Optimizer:
dtype
=
'float32'
if
self
.
_dtype
is
None
else
self
.
_dtype
,
)
main_prog
=
framework
.
default_main_program
()
main_prog
.
lr_sheduler
=
self
.
_learning_rate
main_prog
.
lr_s
c
heduler
=
self
.
_learning_rate
main_prog
.
lr_var
=
lr_var
self
.
_learning_rate_map
[
framework
.
default_main_program
()
...
...
python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py
浏览文件 @
e492ee24
...
...
@@ -67,8 +67,8 @@ class TestConvNet(IPUOpTest):
result
=
[]
for
_
in
range
(
100
):
if
hasattr
(
program
,
"lr_sheduler"
):
program
.
lr_sheduler
.
step
()
if
hasattr
(
program
,
"lr_s
c
heduler"
):
program
.
lr_s
c
heduler
.
step
()
loss_res
=
exe
.
run
(
program
,
feed
=
self
.
feed
,
fetch_list
=
self
.
fetch_list
)
...
...
python/paddle/fluid/tests/unittests/test_dist_base.py
浏览文件 @
e492ee24
...
...
@@ -132,13 +132,13 @@ class TestDistRunnerBase:
@
staticmethod
def
get_lr_scheduler
(
program
):
lr_sheduler
=
None
if
hasattr
(
program
,
'lr_sheduler'
):
lr_s
c
heduler
=
None
if
hasattr
(
program
,
'lr_s
c
heduler'
):
from
paddle.optimizer.lr
import
LRScheduler
lr_s
heduler
=
program
.
lr_s
heduler
assert
isinstance
(
lr_sheduler
,
LRScheduler
),
"must be LRScheduler"
return
lr_sheduler
lr_s
cheduler
=
program
.
lr_sc
heduler
assert
isinstance
(
lr_s
c
heduler
,
LRScheduler
),
"must be LRScheduler"
return
lr_s
c
heduler
def
run_pserver
(
self
,
args
):
self
.
lr
=
args
.
lr
...
...
@@ -196,14 +196,14 @@ class TestDistRunnerBase:
out_losses
=
[]
main_program
=
fluid
.
default_main_program
()
lr_sheduler
=
self
.
get_lr_scheduler
(
main_program
)
lr_s
c
heduler
=
self
.
get_lr_scheduler
(
main_program
)
for
i
in
range
(
RUN_STEP
):
loss
=
exe
.
run
(
main_program
,
fetch_list
=
[
avg_cost
])
loss
=
loss
[
0
]
if
loss
else
None
out_losses
.
append
(
loss
)
print_to_err
(
type
(
self
).
__name__
,
"run step %d finished"
%
i
)
if
lr_sheduler
is
not
None
:
lr_sheduler
.
step
()
if
lr_s
c
heduler
is
not
None
:
lr_s
c
heduler
.
step
()
data_loader
.
reset
()
print_to_err
(
type
(
self
).
__name__
,
"trainer run finished"
)
...
...
python/paddle/fluid/tests/unittests/test_newprofiler.py
浏览文件 @
e492ee24
...
...
@@ -92,7 +92,7 @@ class TestProfiler(unittest.TestCase):
y
=
x
/
2.0
prof
.
step
()
def
my_sheduler
(
num_step
):
def
my_s
c
heduler
(
num_step
):
if
num_step
%
5
<
2
:
return
profiler
.
ProfilerState
.
RECORD_AND_RETURN
elif
num_step
%
5
<
3
:
...
...
@@ -102,7 +102,7 @@ class TestProfiler(unittest.TestCase):
else
:
return
profiler
.
ProfilerState
.
CLOSED
def
my_sheduler1
(
num_step
):
def
my_s
c
heduler1
(
num_step
):
if
num_step
%
5
<
2
:
return
profiler
.
ProfilerState
.
RECORD
elif
num_step
%
5
<
3
:
...
...
@@ -124,7 +124,7 @@ class TestProfiler(unittest.TestCase):
prof
=
None
with
profiler
.
Profiler
(
targets
=
[
profiler
.
ProfilerTarget
.
CPU
],
scheduler
=
my_sheduler
,
scheduler
=
my_s
c
heduler
,
on_trace_ready
=
my_trace_back
,
)
as
prof
:
for
i
in
range
(
5
):
...
...
@@ -132,7 +132,7 @@ class TestProfiler(unittest.TestCase):
prof
.
step
()
prof
=
None
with
profiler
.
Profiler
(
targets
=
[
profiler
.
ProfilerTarget
.
CPU
],
scheduler
=
my_sheduler1
targets
=
[
profiler
.
ProfilerTarget
.
CPU
],
scheduler
=
my_s
c
heduler1
)
as
prof
:
for
i
in
range
(
5
):
y
=
x
/
2.0
...
...
python/paddle/incubate/distributed/fleet/parameter_server/ir/public.py
浏览文件 @
e492ee24
...
...
@@ -1362,11 +1362,11 @@ def _get_optimize_ops(_program):
def
_add_lr_decay_table_pass
(
main_program
,
compiled_config
,
lr_decay_steps
):
if
hasattr
(
compiled_config
.
origin_main_program
,
'lr_sheduler'
):
if
hasattr
(
compiled_config
.
origin_main_program
,
'lr_s
c
heduler'
):
from
paddle.optimizer.lr
import
LRScheduler
assert
isinstance
(
compiled_config
.
origin_main_program
.
lr_sheduler
,
LRScheduler
compiled_config
.
origin_main_program
.
lr_s
c
heduler
,
LRScheduler
),
"must be LRScheduler"
ops
=
_get_optimize_ops
(
compiled_config
.
origin_main_program
)
lr_param_dict
=
_get_lr_param_dict
(
ops
)
...
...
@@ -1374,8 +1374,8 @@ def _add_lr_decay_table_pass(main_program, compiled_config, lr_decay_steps):
lr_decay_main_program
,
lr_decay_startup_program
,
lr_name
,
)
=
_get_lr_sheduler_program
(
compiled_config
.
origin_main_program
.
lr_sheduler
,
)
=
_get_lr_s
c
heduler_program
(
compiled_config
.
origin_main_program
.
lr_s
c
heduler
,
lr_param_dict
,
lr_decay_steps
,
)
...
...
@@ -1399,7 +1399,7 @@ def _get_lr_param_dict(opt_ops):
return
lr_param_dict
def
_get_lr_s
heduler_program
(
lr_s
heduler
,
lr_param_dict
,
lr_decay_steps
):
def
_get_lr_s
cheduler_program
(
lr_sc
heduler
,
lr_param_dict
,
lr_decay_steps
):
schedler_decay
=
[
'NoamDecay'
,
'NaturalExpDecay'
,
...
...
@@ -1424,11 +1424,13 @@ def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps):
decay_startup_program
=
paddle
.
static
.
Program
()
lr_name
=
""
if
isinstance
(
lr_sheduler
,
ExponentialDecay
):
if
isinstance
(
lr_s
c
heduler
,
ExponentialDecay
):
with
paddle
.
static
.
program_guard
(
decay_main_program
,
decay_startup_program
):
lr
=
exponential_decay
(
1.0
,
lr_decay_steps
,
lr_sheduler
.
gamma
,
True
)
lr
=
exponential_decay
(
1.0
,
lr_decay_steps
,
lr_scheduler
.
gamma
,
True
)
lr_name
=
lr
.
name
logging
.
warn
(
"ExponentialDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow:
\n
"
...
...
@@ -1437,21 +1439,25 @@ def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps):
"
\t
strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP }
\n
"
%
lr_decay_steps
)
elif
isinstance
(
lr_sheduler
,
NoamDecay
):
elif
isinstance
(
lr_s
c
heduler
,
NoamDecay
):
with
paddle
.
static
.
program_guard
(
decay_main_program
,
decay_startup_program
):
lr
=
noam_decay
(
lr_sheduler
.
d_model
,
lr_sheduler
.
warmup_steps
,
1.0
)
lr
=
noam_decay
(
lr_scheduler
.
d_model
,
lr_scheduler
.
warmup_steps
,
1.0
)
lr_name
=
lr
.
name
logging
.
warn
(
"NoamDecay is set, warmup steps is [ %d ]"
%
lr_sheduler
.
warmup_steps
%
lr_s
c
heduler
.
warmup_steps
)
elif
isinstance
(
lr_sheduler
,
NaturalExpDecay
):
elif
isinstance
(
lr_s
c
heduler
,
NaturalExpDecay
):
with
paddle
.
static
.
program_guard
(
decay_main_program
,
decay_startup_program
):
lr
=
natural_exp_decay
(
1.0
,
lr_decay_steps
,
lr_sheduler
.
gamma
,
True
)
lr
=
natural_exp_decay
(
1.0
,
lr_decay_steps
,
lr_scheduler
.
gamma
,
True
)
lr_name
=
lr
.
name
logging
.
warn
(
"NaturalExpDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow:
\n
"
...
...
@@ -1460,12 +1466,12 @@ def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps):
"
\t
strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP }
\n
"
%
lr_decay_steps
)
elif
isinstance
(
lr_sheduler
,
InverseTimeDecay
):
elif
isinstance
(
lr_s
c
heduler
,
InverseTimeDecay
):
with
paddle
.
static
.
program_guard
(
decay_main_program
,
decay_startup_program
):
lr
=
inverse_time_decay
(
1.0
,
lr_decay_steps
,
lr_sheduler
.
gamma
,
True
1.0
,
lr_decay_steps
,
lr_s
c
heduler
.
gamma
,
True
)
lr_name
=
lr
.
name
logging
.
warn
(
...
...
python/paddle/incubate/distributed/fleet/parameter_server/ir/trainer_pass.py
浏览文件 @
e492ee24
...
...
@@ -93,7 +93,7 @@ def delete_optimizer_pass(program, config):
optimizer_ops
.
extend
(
lr_ops
)
_delete_optimizer_op_and_vars
(
program
,
optimizer_ops
)
if
hasattr
(
config
.
origin_main_program
,
'lr_sheduler'
):
if
hasattr
(
config
.
origin_main_program
,
'lr_s
c
heduler'
):
_add_lr_var
(
program
,
config
)
return
program
...
...
python/paddle/jit/dy2static/partial_program.py
浏览文件 @
e492ee24
...
...
@@ -1127,8 +1127,10 @@ def add_build_strategy_for(
)
ir_graph
=
framework
.
IrGraph
(
compiled_program
.
_graph
)
builded_program
=
ir_graph
.
to_program
()
if
hasattr
(
compiled_program
.
_program
,
'lr_sheduler'
):
builded_program
.
lr_sheduler
=
compiled_program
.
_program
.
lr_sheduler
if
hasattr
(
compiled_program
.
_program
,
'lr_scheduler'
):
builded_program
.
lr_scheduler
=
(
compiled_program
.
_program
.
lr_scheduler
)
else
:
# can't just create a new program, we need copy the vardesc.
builded_program
=
paddle
.
static
.
Program
()
...
...
python/paddle/optimizer/optimizer.py
浏览文件 @
e492ee24
...
...
@@ -444,7 +444,7 @@ class Optimizer:
dtype
=
_lr_dtype
,
)
main_prog
=
framework
.
default_main_program
()
main_prog
.
lr_sheduler
=
self
.
_learning_rate
main_prog
.
lr_s
c
heduler
=
self
.
_learning_rate
main_prog
.
lr_var
=
lr_var
self
.
_learning_rate_map
[
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录