Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
598b2d1f
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
598b2d1f
编写于
9月 29, 2018
作者:
Z
Zeng Jinle
提交者:
GitHub
9月 29, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13667 from sneaxiy/release/1.0.0
Cherry-pick API change to Release/1.0.0
上级
8d16de73
696f6453
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
31 addition
and
81 deletion
+31
-81
paddle/fluid/API.spec
paddle/fluid/API.spec
+8
-8
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+19
-72
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+2
-0
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+2
-1
未找到文件。
paddle/fluid/API.spec
浏览文件 @
598b2d1f
...
...
@@ -145,14 +145,14 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key
paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None))
paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', '
out', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, Non
e, None, None))
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', '
out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None,
-1, False, None, None))
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', '
out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None,
-1, False, None, None))
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', '
out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None,
-1, False, None, None))
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', '
out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None,
-1, False, None, None))
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', '
out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None,
-1, False, None, None))
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', '
out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None,
-1, False, None, None))
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', '
out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None,
-1, False, None, None))
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', '
act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, Tru
e, None, None))
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', '
axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(
-1, False, None, None))
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', '
axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(
-1, False, None, None))
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', '
axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(
-1, False, None, None))
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', '
axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(
-1, False, None, None))
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', '
axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(
-1, False, None, None))
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', '
axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(
-1, False, None, None))
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', '
axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(
-1, False, None, None))
paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0))
paddle.fluid.layers.gaussian_random ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32', False))
paddle.fluid.layers.sampling_id ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32'))
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
598b2d1f
...
...
@@ -6630,8 +6630,6 @@ def _elementwise_op(helper):
assert
y
is
not
None
,
'y cannot be None in {}'
.
format
(
op_type
)
axis
=
helper
.
kwargs
.
get
(
'axis'
,
-
1
)
use_mkldnn
=
helper
.
kwargs
.
get
(
'use_mkldnn'
,
False
)
out
=
helper
.
kwargs
.
get
(
'out'
,
None
)
if
out
is
None
:
name
=
helper
.
kwargs
.
get
(
'name'
,
None
)
if
name
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
...
...
@@ -6650,13 +6648,7 @@ def _elementwise_op(helper):
@
templatedoc
()
def
scale
(
x
,
scale
=
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
out
=
None
,
act
=
None
,
name
=
None
):
def
scale
(
x
,
scale
=
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
None
):
"""
${comment}
...
...
@@ -6665,7 +6657,6 @@ def scale(x,
scale(${scale_type}): ${scale_comment}
bias(${bias_type}): ${bias_comment}
bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment}
out(Tensor): Output tensor.
act(basestring|None): Activation applied to the output.
name(basestring|None): Name of the output.
...
...
@@ -6674,7 +6665,6 @@ def scale(x,
"""
helper
=
LayerHelper
(
'scale'
,
**
locals
())
if
out
is
None
:
if
name
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
else
:
...
...
@@ -6693,73 +6683,31 @@ def scale(x,
return
helper
.
append_activation
(
out
)
def
elementwise_add
(
x
,
y
,
out
=
None
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
def
elementwise_add
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
def
elementwise_div
(
x
,
y
,
out
=
None
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
def
elementwise_div
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_div'
,
**
locals
()))
def
elementwise_sub
(
x
,
y
,
out
=
None
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
def
elementwise_sub
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_sub'
,
**
locals
()))
def
elementwise_mul
(
x
,
y
,
out
=
None
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
def
elementwise_mul
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_mul'
,
**
locals
()))
def
elementwise_max
(
x
,
y
,
out
=
None
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
def
elementwise_max
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_max'
,
**
locals
()))
def
elementwise_min
(
x
,
y
,
out
=
None
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
def
elementwise_min
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_min'
,
**
locals
()))
def
elementwise_pow
(
x
,
y
,
out
=
None
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
def
elementwise_pow
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_pow'
,
**
locals
()))
...
...
@@ -6771,7 +6719,6 @@ for func in [
func
.
__doc__
=
_generate_doc_string_
(
op_proto
,
additional_args_lines
=
[
"out (Tensor): The output tensor of elementwise op."
,
"act (basestring|None): Activation applied to the output."
,
"name (basestring|None): Name of the output."
])
python/paddle/fluid/layers/ops.py
浏览文件 @
598b2d1f
...
...
@@ -56,6 +56,8 @@ for _OP in set(__all__):
# e.g.: test_program_code.py, test_dist_train.py
globals
()[
'_scale'
]
=
generate_layer_fn
(
'scale'
)
globals
()[
'_elementwise_div'
]
=
generate_layer_fn
(
'elementwise_div'
)
__all__
+=
__activations_noattr__
for
_OP
in
set
(
__activations_noattr__
):
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
598b2d1f
...
...
@@ -26,6 +26,7 @@ from .layer_helper import LayerHelper
from
.regularizer
import
append_regularization_ops
from
.clip
import
append_gradient_clip_ops
,
error_clip_callback
from
contextlib
import
contextmanager
from
.layers
import
ops
__all__
=
[
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
,
'Ftrl'
,
...
...
@@ -1301,7 +1302,7 @@ class ModelAverage(Optimizer):
x
=
tmp
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
)
sum
=
layers
.
cast
(
x
=
sum
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
)
layers
.
elementwise_div
(
x
=
sum
,
y
=
tmp
,
out
=
param
)
ops
.
_
elementwise_div
(
x
=
sum
,
y
=
tmp
,
out
=
param
)
def
_add_average_restore_op
(
self
,
block
,
param_grad
):
param
=
block
.
_clone_variable
(
param_grad
[
0
])
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录