Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0edeb838
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0edeb838
编写于
8月 29, 2019
作者:
Y
Yibing Liu
提交者:
GitHub
8月 29, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix arg do_model_average in param_attr (#19448)
test=release/1.5
上级
dd3c2422
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
6 addition
and
6 deletion
+6
-6
paddle/fluid/API.spec
paddle/fluid/API.spec
+1
-1
python/paddle/fluid/param_attr.py
python/paddle/fluid/param_attr.py
+5
-5
未找到文件。
paddle/fluid/API.spec
浏览文件 @
0edeb838
...
@@ -892,7 +892,7 @@ paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core_avx.LoDTensorA
...
@@ -892,7 +892,7 @@ paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core_avx.LoDTensorA
paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core_avx.CPUPlace) -> None
paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core_avx.CPUPlace) -> None
paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core_avx.CUDAPlace, arg0: int) -> None
paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core_avx.CUDAPlace, arg0: int) -> None
paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core_avx.CUDAPinnedPlace) -> None
paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core_avx.CUDAPinnedPlace) -> None
paddle.fluid.ParamAttr.__init__ (ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None,
Fals
e)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.ParamAttr.__init__ (ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None,
Tru
e)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.WeightNormParamAttr.__init__ (ArgSpec(args=['self', 'dim', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, None, 1.0, None, True, None, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.WeightNormParamAttr.__init__ (ArgSpec(args=['self', 'dim', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, None, 1.0, None, True, None, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.DataFeeder.__init__ (ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.DataFeeder.__init__ (ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.DataFeeder.decorate_reader (ArgSpec(args=['self', 'reader', 'multi_devices', 'num_places', 'drop_last'], varargs=None, keywords=None, defaults=(None, True)), ('document', 'be47d7e07824b4281da77472846955ac'))
paddle.fluid.DataFeeder.decorate_reader (ArgSpec(args=['self', 'reader', 'multi_devices', 'num_places', 'drop_last'], varargs=None, keywords=None, defaults=(None, True)), ('document', 'be47d7e07824b4281da77472846955ac'))
...
...
python/paddle/fluid/param_attr.py
浏览文件 @
0edeb838
...
@@ -42,8 +42,8 @@ class ParamAttr(object):
...
@@ -42,8 +42,8 @@ class ParamAttr(object):
trainable(bool): Whether this parameter is trainable. Default True.
trainable(bool): Whether this parameter is trainable. Default True.
gradient_clip(BaseGradientClipAttr): The method to clip this parameter's
gradient_clip(BaseGradientClipAttr): The method to clip this parameter's
gradient. Default None.
gradient. Default None.
do_model_average(bool): Whether this parameter should do model average
.
do_model_average(bool): Whether this parameter should do model average
Default Fals
e.
when model average is enabled. Default Tru
e.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
...
@@ -65,14 +65,14 @@ class ParamAttr(object):
...
@@ -65,14 +65,14 @@ class ParamAttr(object):
regularizer
=
None
,
regularizer
=
None
,
trainable
=
True
,
trainable
=
True
,
gradient_clip
=
None
,
gradient_clip
=
None
,
do_model_average
=
Fals
e
):
do_model_average
=
Tru
e
):
self
.
name
=
name
self
.
name
=
name
self
.
initializer
=
initializer
self
.
initializer
=
initializer
self
.
learning_rate
=
learning_rate
self
.
learning_rate
=
learning_rate
self
.
regularizer
=
regularizer
self
.
regularizer
=
regularizer
self
.
trainable
=
trainable
self
.
trainable
=
trainable
self
.
gradient_clip
=
gradient_clip
self
.
gradient_clip
=
gradient_clip
self
.
model_average
=
do_model_average
self
.
do_
model_average
=
do_model_average
def
_set_default_initializer
(
self
,
initializer
):
def
_set_default_initializer
(
self
,
initializer
):
"""
"""
...
@@ -170,7 +170,7 @@ class ParamAttr(object):
...
@@ -170,7 +170,7 @@ class ParamAttr(object):
'regularizer'
:
self
.
regularizer
,
'regularizer'
:
self
.
regularizer
,
'trainable'
:
self
.
trainable
,
'trainable'
:
self
.
trainable
,
'gradient_clip_attr'
:
self
.
gradient_clip
,
'gradient_clip_attr'
:
self
.
gradient_clip
,
'
model_average'
:
self
.
model_average
'
do_model_average'
:
self
.
do_
model_average
}
}
if
with_initializer
:
if
with_initializer
:
kwargs
[
'initializer'
]
=
self
.
initializer
kwargs
[
'initializer'
]
=
self
.
initializer
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录