Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
bc1fa4fd
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
bc1fa4fd
编写于
9月 25, 2018
作者:
X
Xin Pan
提交者:
GitHub
9月 25, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13556 from panyx0718/doc
clean a few more kwargs
上级
46498bf1
355a2265
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
31 addition
and
27 deletion
+31
-27
paddle/fluid/API.spec
paddle/fluid/API.spec
+2
-2
python/paddle/fluid/parallel_executor.py
python/paddle/fluid/parallel_executor.py
+1
-22
python/paddle/fluid/param_attr.py
python/paddle/fluid/param_attr.py
+28
-3
未找到文件。
paddle/fluid/API.spec
浏览文件 @
bc1fa4fd
...
...
@@ -41,7 +41,7 @@ paddle.fluid.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id',
paddle.fluid.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0))
paddle.fluid.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.DistributeTranspilerConfig.__init__
paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=
'kwargs'
, defaults=(None, None, None, None, None, 1, 0, None))
paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=
None
, defaults=(None, None, None, None, None, 1, 0, None))
paddle.fluid.ParallelExecutor.run ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True))
paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ExecutionStrategy) -> None
paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.GradientScaleStrategy, arg0: int) -> None
...
...
@@ -374,7 +374,7 @@ paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core.CPUPlace) -> Non
paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core.CUDAPlace, arg0: int) -> None
paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core.CUDAPinnedPlace) -> None
paddle.fluid.ParamAttr.__init__ ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None, False))
paddle.fluid.WeightNormParamAttr.__init__ ArgSpec(args=['self', 'dim'
], varargs=None, keywords='kwargs', defaults=(None,
))
paddle.fluid.WeightNormParamAttr.__init__ ArgSpec(args=['self', 'dim'
, 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, None, 1.0, None, True, None, False
))
paddle.fluid.DataFeeder.__init__ ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.DataFeeder.decorate_reader ArgSpec(args=['self', 'reader', 'multi_devices', 'num_places', 'drop_last'], varargs=None, keywords=None, defaults=(None, True))
paddle.fluid.DataFeeder.feed ArgSpec(args=['self', 'iterable'], varargs=None, keywords=None, defaults=None)
...
...
python/paddle/fluid/parallel_executor.py
浏览文件 @
bc1fa4fd
...
...
@@ -74,28 +74,7 @@ class ParallelExecutor(object):
build_strategy
=
None
,
num_trainers
=
1
,
trainer_id
=
0
,
scope
=
None
,
**
kwargs
):
if
len
(
kwargs
)
!=
0
:
err_msg
=
""
for
key
in
kwargs
:
if
key
in
dir
(
ExecutionStrategy
):
err_msg
+=
\
"Setting {0} by constructor is deprecated. Use "
\
"strategy=ExecutionStrategy(); strategy.{0}=xxx; "
\
"pe=ParallelExecutor(exec_strategy=strategy) "
\
"instead.
\n
"
.
format
(
key
)
elif
key
in
dir
(
BuildStrategy
):
err_msg
+=
\
"Setting {0} by constructor is deprecated. Use "
\
"strategy=BuildStrategy(); See help("
\
"paddle.fluid.ParallelExecutor.BuildStrategy)
\n
"
.
format
(
key
)
else
:
err_msg
+=
"Setting {0} by constructor is deprecated. Use strategy.
\n
"
.
format
(
key
)
raise
ValueError
(
err_msg
)
scope
=
None
):
self
.
_places
=
[]
self
.
_act_places
=
[]
if
use_cuda
:
...
...
python/paddle/fluid/param_attr.py
浏览文件 @
bc1fa4fd
...
...
@@ -185,7 +185,17 @@ class WeightNormParamAttr(ParamAttr):
Args:
dim(list): The parameter's name. Default None.
kwargs: Any field in ParamAttr. Default None.
name(str): The parameter's name. Default None.
initializer(Initializer): The method to initial this parameter. Default None.
learning_rate(float): The parameter's learning rate. The learning rate when
optimize is :math:`global\_lr * parameter\_lr * scheduler\_factor`.
Default 1.0.
regularizer(WeightDecayRegularizer): Regularization factor. Default None.
trainable(bool): Whether this parameter is trainable. Default True.
gradient_clip(BaseGradientClipAttr): The method to clip this parameter's
gradient. Default None.
do_model_average(bool): Whether this parameter should do model average.
Default False.
Examples:
.. code-block:: python
...
...
@@ -204,6 +214,21 @@ class WeightNormParamAttr(ParamAttr):
# these paramters for inference.
params_with_weight_norm
=
[]
def
__init__
(
self
,
dim
=
None
,
**
kwargs
):
super
(
WeightNormParamAttr
,
self
).
__init__
(
**
kwargs
)
def
__init__
(
self
,
dim
=
None
,
name
=
None
,
initializer
=
None
,
learning_rate
=
1.0
,
regularizer
=
None
,
trainable
=
True
,
gradient_clip
=
None
,
do_model_average
=
False
):
super
(
WeightNormParamAttr
,
self
).
__init__
(
name
=
name
,
initializer
=
initializer
,
learning_rate
=
learning_rate
,
regularizer
=
regularizer
,
trainable
=
trainable
,
gradient_clip
=
gradient_clip
,
do_model_average
=
do_model_average
)
self
.
dim
=
dim
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录