未验证 提交 d1f3229b 编写于 作者: Z Zhang Ting 提交者: GitHub

remove init_on_cpu because it does not take effect, test=develop (#29)

上级 31a8b805
...@@ -20,7 +20,6 @@ import math ...@@ -20,7 +20,6 @@ import math
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers.ops as ops import paddle.fluid.layers.ops as ops
from paddle.fluid.initializer import init_on_cpu
from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter
lr_strategy = 'cosine_decay' lr_strategy = 'cosine_decay'
...@@ -40,7 +39,6 @@ def cosine_decay(learning_rate, step_each_epoch, epochs=120): ...@@ -40,7 +39,6 @@ def cosine_decay(learning_rate, step_each_epoch, epochs=120):
""" """
global_step = _decay_step_counter() global_step = _decay_step_counter()
with init_on_cpu():
epoch = ops.floor(global_step / step_each_epoch) epoch = ops.floor(global_step / step_each_epoch)
decayed_lr = learning_rate * \ decayed_lr = learning_rate * \
(ops.cos(epoch * (math.pi / epochs)) + 1)/2 (ops.cos(epoch * (math.pi / epochs)) + 1)/2
...@@ -63,7 +61,6 @@ def cosine_decay_with_warmup(learning_rate, step_each_epoch, epochs=120): ...@@ -63,7 +61,6 @@ def cosine_decay_with_warmup(learning_rate, step_each_epoch, epochs=120):
warmup_epoch = fluid.layers.fill_constant( warmup_epoch = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=float(5), force_cpu=True) shape=[1], dtype='float32', value=float(5), force_cpu=True)
with init_on_cpu():
epoch = ops.floor(global_step / step_each_epoch) epoch = ops.floor(global_step / step_each_epoch)
with fluid.layers.control_flow.Switch() as switch: with fluid.layers.control_flow.Switch() as switch:
with switch.case(epoch < warmup_epoch): with switch.case(epoch < warmup_epoch):
...@@ -95,7 +92,6 @@ def exponential_decay_with_warmup(learning_rate, ...@@ -95,7 +92,6 @@ def exponential_decay_with_warmup(learning_rate,
warmup_epoch = fluid.layers.fill_constant( warmup_epoch = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=float(warm_up_epoch), force_cpu=True) shape=[1], dtype='float32', value=float(warm_up_epoch), force_cpu=True)
with init_on_cpu():
epoch = ops.floor(global_step / step_each_epoch) epoch = ops.floor(global_step / step_each_epoch)
with fluid.layers.control_flow.Switch() as switch: with fluid.layers.control_flow.Switch() as switch:
with switch.case(epoch < warmup_epoch): with switch.case(epoch < warmup_epoch):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册