Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
4ec9ecae
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4ec9ecae
编写于
6月 19, 2018
作者:
Q
Qiao Longfei
提交者:
GitHub
6月 19, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #11547 from jacquesqiao/support-ftrl-optimizer
add ftrl optimizer
上级
c22ebb3b
6caea459
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
178 addition
and
4 deletion
+178
-4
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+112
-4
python/paddle/fluid/tests/unittests/test_optimizer.py
python/paddle/fluid/tests/unittests/test_optimizer.py
+66
-0
未找到文件。
python/paddle/fluid/optimizer.py
浏览文件 @
4ec9ecae
...
@@ -26,10 +26,10 @@ from clip import append_gradient_clip_ops, error_clip_callback
...
@@ -26,10 +26,10 @@ from clip import append_gradient_clip_ops, error_clip_callback
from
contextlib
import
contextmanager
from
contextlib
import
contextmanager
__all__
=
[
__all__
=
[
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
,
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
,
'Ftrl'
,
'SGDOptimizer'
,
'MomentumOptimizer'
,
'AdagradOptimizer'
,
'AdamOptimizer'
,
'SGDOptimizer'
,
'MomentumOptimizer'
,
'AdagradOptimizer'
,
'AdamOptimizer'
,
'AdamaxOptimizer'
,
'DecayedAdagradOptimizer'
,
'RMSPropOptimizer'
,
'AdamaxOptimizer'
,
'DecayedAdagradOptimizer'
,
'RMSPropOptimizer'
,
'Adadelta'
,
'ModelAverage'
,
'Optimizer'
'
FtrlOptimizer'
,
'
Adadelta'
,
'ModelAverage'
,
'Optimizer'
]
]
...
@@ -628,7 +628,7 @@ class AdadeltaOptimizer(Optimizer):
...
@@ -628,7 +628,7 @@ class AdadeltaOptimizer(Optimizer):
E(dx_t^2) &=
\\
rho * E(dx_{t-1}^2) + (1-
\\
rho) * (-g*learning
\\
_rate)^2
E(dx_t^2) &=
\\
rho * E(dx_{t-1}^2) + (1-
\\
rho) * (-g*learning
\\
_rate)^2
Args:
Args:
learning_rate(float): global le
ra
ning rate
learning_rate(float): global le
ar
ning rate
rho(float): rho in equation
rho(float): rho in equation
epsilon(float): epsilon in equation
epsilon(float): epsilon in equation
...
@@ -729,7 +729,7 @@ class RMSPropOptimizer(Optimizer):
...
@@ -729,7 +729,7 @@ class RMSPropOptimizer(Optimizer):
Args:
Args:
learning_rate(float): global le
ra
ning rate.
learning_rate(float): global le
ar
ning rate.
rho(float): rho is :math: `
\\
rho` in equation, set 0.95 by default.
rho(float): rho is :math: `
\\
rho` in equation, set 0.95 by default.
epsilon(float): :math: `
\\
epsilon` in equation is smoothing term to
epsilon(float): :math: `
\\
epsilon` in equation is smoothing term to
avoid division by zero, set 1e-6 by default.
avoid division by zero, set 1e-6 by default.
...
@@ -810,6 +810,113 @@ class RMSPropOptimizer(Optimizer):
...
@@ -810,6 +810,113 @@ class RMSPropOptimizer(Optimizer):
return
rmsprop_op
return
rmsprop_op
class
FtrlOptimizer
(
Optimizer
):
"""
FTRL (Follow The Regularized Leader) Optimizer.
The paper that proposed Follow The Regularized Leader (FTRL):
(https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)
.. math::
&new\_accum = squared\_accum + grad^2
&if (lr\_power == -0.5):
&\quad linear\_accum += grad -
\\
frac{
\\
sqrt{new\_accum} -
\\
sqrt{squared\_accum}}{learning\_rate * param}
&else:
&\quad linear\_accum += grad -
\\
frac{new\_accum^{-lr\_power} - accum^{-lr\_power}}{learning\_rate * param}
&x = l1 * sign(linear\_accum) - linear\_accum
&if (lr\_power == -0.5):
&\quad y =
\\
frac{
\\
sqrt{new\_accum}}{learning\_rate} + (2 * l2)
&\quad pre\_shrink =
\\
frac{x}{y}
&\quad param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0)
&else:
&\quad y =
\\
frac{new\_accum^{-lr\_power}}{learning\_rate} + (2 * l2)
&\quad pre\_shrink =
\\
frac{x}{y}
&\quad param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0)
&squared\_accum += grad^2
Args:
learning_rate (float|Variable): global learning rate.
l1 (float):
l2 (float):
lr_power (float):
Raises:
ValueError: If learning_rate, rho, epsilon, momentum are None.
Examples:
.. code-block:: python
optimizer = fluid.optimizer.Ftrl(0.0001)
_, params_grads = optimizer.minimize(cost)
"""
_squared_acc_str
=
"squared"
_linear_acc_str
=
"linear"
def
__init__
(
self
,
learning_rate
,
l1
=
0.0
,
l2
=
0.0
,
lr_power
=-
0.5
,
**
kwargs
):
super
(
FtrlOptimizer
,
self
).
__init__
(
learning_rate
=
learning_rate
,
**
kwargs
)
if
learning_rate
is
None
:
raise
ValueError
(
"learning_rate is not set."
)
self
.
type
=
"ftrl"
self
.
_l1
=
l1
self
.
_l2
=
l2
self
.
_lr_power
=
lr_power
def
_create_accumulators
(
self
,
block
,
parameters
):
if
not
isinstance
(
block
,
framework
.
Block
):
raise
TypeError
(
"block is not instance of framework.Block."
)
for
p
in
parameters
:
self
.
_add_accumulator
(
self
.
_squared_acc_str
,
p
)
self
.
_add_accumulator
(
self
.
_linear_acc_str
,
p
)
def
_append_optimize_op
(
self
,
block
,
param_and_grad
):
if
not
isinstance
(
block
,
framework
.
Block
):
raise
TypeError
(
"block is not instance of framework.Block."
)
squared_acc
=
self
.
_get_accumulator
(
self
.
_squared_acc_str
,
param_and_grad
[
0
])
linear_acc
=
self
.
_get_accumulator
(
self
.
_linear_acc_str
,
param_and_grad
[
0
])
ftrl_op
=
block
.
append_op
(
type
=
self
.
type
,
inputs
=
{
"Param"
:
param_and_grad
[
0
],
"Grad"
:
param_and_grad
[
1
],
"SquaredAccumulator"
:
squared_acc
,
"LinearAccumulator"
:
linear_acc
,
"LearningRate"
:
self
.
_create_param_lr
(
param_and_grad
),
},
outputs
=
{
"ParamOut"
:
param_and_grad
[
0
],
"SquaredAccumOut"
:
squared_acc
,
"LinearAccumOut"
:
linear_acc
},
attrs
=
{
"l1"
:
self
.
_l1
,
"l2"
:
self
.
_l1
,
"lr_power"
:
self
.
_lr_power
})
return
ftrl_op
# We short the class name, since users will use the optimizer with the package
# We short the class name, since users will use the optimizer with the package
# name. The sample code:
# name. The sample code:
#
#
...
@@ -826,6 +933,7 @@ Adamax = AdamaxOptimizer
...
@@ -826,6 +933,7 @@ Adamax = AdamaxOptimizer
DecayedAdagrad
=
DecayedAdagradOptimizer
DecayedAdagrad
=
DecayedAdagradOptimizer
Adadelta
=
AdadeltaOptimizer
Adadelta
=
AdadeltaOptimizer
RMSProp
=
RMSPropOptimizer
RMSProp
=
RMSPropOptimizer
Ftrl
=
FtrlOptimizer
class
ModelAverage
(
Optimizer
):
class
ModelAverage
(
Optimizer
):
...
...
python/paddle/fluid/tests/unittests/test_optimizer.py
浏览文件 @
4ec9ecae
...
@@ -434,5 +434,71 @@ class TestDecayedAdagradOptimizer(unittest.TestCase):
...
@@ -434,5 +434,71 @@ class TestDecayedAdagradOptimizer(unittest.TestCase):
self
.
assertAlmostEqual
(
init_ops
[
1
].
attr
(
'value'
),
0.0
)
self
.
assertAlmostEqual
(
init_ops
[
1
].
attr
(
'value'
),
0.0
)
class
TestFtrlOptimizer
(
unittest
.
TestCase
):
class
MockFtrl
(
optimizer
.
FtrlOptimizer
):
def
get_accumulators
(
self
):
return
self
.
_accumulators
def
get_squared_str
(
self
):
return
self
.
_squared_acc_str
def
get_linear_str
(
self
):
return
self
.
_linear_acc_str
def
test_ftrl_optimizer
(
self
):
init_program
=
framework
.
Program
()
program
=
framework
.
Program
()
block
=
program
.
global_block
()
mul_x
=
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"mul.x"
,
optimize_attr
=
{
'learning_rate'
:
1.1
})
mul_y
=
block
.
create_var
(
dtype
=
"float32"
,
shape
=
[
10
,
8
],
lod_level
=
0
,
name
=
"mul.y"
)
mul_out
=
block
.
create_var
(
dtype
=
"float32"
,
shape
=
[
5
,
8
],
lod_level
=
0
,
name
=
"mul.out"
)
block
.
append_op
(
type
=
"mul"
,
inputs
=
{
"X"
:
mul_x
,
"Y"
:
mul_y
},
outputs
=
{
"Out"
:
mul_out
},
attrs
=
{
"x_num_col_dims"
:
1
})
mean_out
=
block
.
create_var
(
dtype
=
"float32"
,
shape
=
[
1
],
lod_level
=
0
,
name
=
"mean.out"
)
block
.
append_op
(
type
=
"mean"
,
inputs
=
{
"X"
:
mul_out
},
outputs
=
{
"Out"
:
mean_out
})
learning_rate
=
0.01
ftrl_optimizer
=
self
.
MockFtrl
(
learning_rate
=
learning_rate
,
l1
=
0.0
,
l2
=
0.0
,
lr_power
=-
0.5
)
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
ftrl_optimizer
.
get_accumulators
()),
0
)
opts
=
ftrl_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
([
op
.
type
for
op
in
opts
],
[
"fill_constant"
,
"elementwise_mul"
,
"ftrl"
])
# Check accumulators
accumulators
=
ftrl_optimizer
.
get_accumulators
()
self
.
assertEqual
(
len
(
accumulators
),
2
)
self
.
assertTrue
(
ftrl_optimizer
.
get_squared_str
()
in
accumulators
)
self
.
assertTrue
(
ftrl_optimizer
.
get_linear_str
()
in
accumulators
)
squared_acc
=
accumulators
[
ftrl_optimizer
.
get_squared_str
()]
linear_acc
=
accumulators
[
ftrl_optimizer
.
get_linear_str
()]
self
.
assertEqual
(
len
(
squared_acc
),
1
)
self
.
assertEqual
(
len
(
linear_acc
),
1
)
self
.
assertTrue
(
mul_x
.
name
in
squared_acc
)
self
.
assertTrue
(
mul_x
.
name
in
linear_acc
)
# Check init_program
init_ops
=
init_program
.
global_block
().
ops
self
.
assertEqual
(
len
(
init_ops
),
3
)
self
.
assertEqual
(
init_ops
[
0
].
type
,
"fill_constant"
)
self
.
assertAlmostEqual
(
init_ops
[
0
].
attr
(
'value'
),
learning_rate
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录