Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
a8f66365
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a8f66365
编写于
9月 22, 2018
作者:
Z
Zeng Jinle
提交者:
GitHub
9月 22, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13524 from sneaxiy/fix_api_kwargs
Remove kwargs in elementwise layers and scale layer
上级
175a2ef2
70e70d7d
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
150 addition
and
27 deletion
+150
-27
paddle/fluid/API.spec
paddle/fluid/API.spec
+8
-8
paddle/fluid/operators/scale_op.cc
paddle/fluid/operators/scale_op.cc
+8
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+2
-1
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+8
-1
python/paddle/fluid/layers/learning_rate_scheduler.py
python/paddle/fluid/layers/learning_rate_scheduler.py
+2
-2
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+113
-3
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+5
-8
python/paddle/fluid/tests/unittests/test_dist_train.py
python/paddle/fluid/tests/unittests/test_dist_train.py
+2
-1
python/paddle/fluid/tests/unittests/test_program_code.py
python/paddle/fluid/tests/unittests/test_program_code.py
+2
-1
未找到文件。
paddle/fluid/API.spec
浏览文件 @
a8f66365
...
...
@@ -178,6 +178,14 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key
paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None))
paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None))
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)
...
...
@@ -242,15 +250,7 @@ paddle.fluid.layers.Print ArgSpec(args=['input', 'first_n', 'message', 'summariz
paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords='ignored', defaults=(None,))
paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.scale ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.elementwise_add ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.elementwise_div ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.elementwise_sub ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.elementwise_mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.elementwise_max ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.elementwise_min ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.elementwise_pow ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.clip ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.clip_by_norm ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
...
...
paddle/fluid/operators/scale_op.cc
浏览文件 @
a8f66365
...
...
@@ -46,9 +46,15 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment
(
R"DOC(
**Scale operator**
Multiply the input tensor with a float scalar to scale
the input tensor.
Apply scaling and bias addition to
the input tensor.
$$Out = scale*X$$
if bias_after_scale=True:
$$Out = scale*X + bias$$
else:
$$Out = scale*(X + bias)$$
)DOC"
);
AddAttr
<
float
>
(
"scale"
,
"The scaling factor of the scale operator."
)
.
SetDefault
(
1.0
);
...
...
python/paddle/fluid/framework.py
浏览文件 @
a8f66365
...
...
@@ -489,7 +489,8 @@ class OpProtoHolder(object):
def
generated_op_attr_names
():
return
{
core
.
op_proto_and_checker_maker
.
kOpRoleAttrName
(),
core
.
op_proto_and_checker_maker
.
kOpRoleVarAttrName
()
core
.
op_proto_and_checker_maker
.
kOpRoleVarAttrName
(),
core
.
op_proto_and_checker_maker
.
kOpNameScopeAttrName
()
}
...
...
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
a8f66365
...
...
@@ -61,7 +61,7 @@ def escape_math(text):
_two_dollar_pattern_
.
sub
(
r
"!!\1!!"
,
text
)))
def
_generate_doc_string_
(
op_proto
):
def
_generate_doc_string_
(
op_proto
,
additional_args_lines
=
None
):
"""
Generate docstring by OpProto
...
...
@@ -101,6 +101,13 @@ def _generate_doc_string_(op_proto):
buf
.
write
(
escape_math
(
each_attr
.
comment
))
buf
.
write
(
'
\n
'
)
if
additional_args_lines
is
not
None
:
for
line
in
additional_args_lines
:
line
=
line
.
strip
()
buf
.
write
(
' '
)
buf
.
write
(
line
)
buf
.
write
(
'
\n
'
)
if
len
(
op_proto
.
outputs
)
!=
0
:
buf
.
write
(
'
\n
Returns:
\n
'
)
buf
.
write
(
' '
)
...
...
python/paddle/fluid/layers/learning_rate_scheduler.py
浏览文件 @
a8f66365
...
...
@@ -68,7 +68,7 @@ def noam_decay(d_model, warmup_steps):
a
=
global_step
**-
0.5
b
=
(
warmup_steps
**-
1.5
)
*
global_step
lr_value
=
(
d_model
**-
0.5
)
*
ops
.
elementwise_min
(
a
,
b
)
lr_value
=
(
d_model
**-
0.5
)
*
nn
.
elementwise_min
(
a
,
b
)
return
lr_value
...
...
@@ -241,7 +241,7 @@ def polynomial_decay(learning_rate,
else
:
decay_steps_var
=
tensor
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
float
(
decay_steps
))
global_step
=
ops
.
elementwise_min
(
x
=
global_step
,
y
=
decay_steps_var
)
global_step
=
nn
.
elementwise_min
(
x
=
global_step
,
y
=
decay_steps_var
)
decayed_lr
=
(
learning_rate
-
end_learning_rate
)
*
\
((
1
-
global_step
/
decay_steps
)
**
power
)
+
end_learning_rate
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
a8f66365
...
...
@@ -20,9 +20,9 @@ from __future__ import print_function
import
numpy
as
np
from
..layer_helper
import
LayerHelper
from
..initializer
import
Normal
,
Constant
from
..framework
import
Variable
from
..framework
import
Variable
,
OpProtoHolder
from
..param_attr
import
ParamAttr
from
.layer_function_generator
import
autodoc
,
templatedoc
from
.layer_function_generator
import
autodoc
,
templatedoc
,
_generate_doc_string_
from
.tensor
import
concat
from
.
import
utils
from
..
import
unique_name
...
...
@@ -125,6 +125,14 @@ __all__ = [
'sequence_enumerate'
,
'expand'
,
'sequence_concat'
,
'scale'
,
'elementwise_add'
,
'elementwise_div'
,
'elementwise_sub'
,
'elementwise_mul'
,
'elementwise_max'
,
'elementwise_min'
,
'elementwise_pow'
,
]
...
...
@@ -3614,7 +3622,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
attrs
=
{
'transpose_X'
:
transpose_x
,
'transpose_Y'
:
transpose_y
,
'alpha'
:
alpha
,
'alpha'
:
float
(
alpha
)
,
})
return
out
...
...
@@ -6453,3 +6461,105 @@ def expand(x, expand_times, name=None):
outputs
=
{
'Out'
:
out
},
attrs
=
{
'expand_times'
:
expand_times
})
return
out
def
_elementwise_op
(
helper
):
op_type
=
helper
.
layer_type
x
=
helper
.
kwargs
.
get
(
'x'
,
None
)
y
=
helper
.
kwargs
.
get
(
'y'
,
None
)
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
assert
y
is
not
None
,
'y cannot be None in {}'
.
format
(
op_type
)
axis
=
helper
.
kwargs
.
get
(
'axis'
,
-
1
)
use_mkldnn
=
helper
.
kwargs
.
get
(
'use_mkldnn'
,
False
)
name
=
helper
.
kwargs
.
get
(
'name'
,
None
)
if
name
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
,
'use_mkldnn'
:
use_mkldnn
})
return
helper
.
append_activation
(
out
)
@
templatedoc
()
def
scale
(
x
,
scale
=
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
scale(${scale_type}): ${scale_comment}
bias(${bias_type}): ${bias_comment}
bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment}
act(basestring|None): Activation applied to the output.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'scale'
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
'scale'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'scale'
:
float
(
scale
),
'bias'
:
float
(
bias
),
'bias_after_scale'
:
bias_after_scale
})
return
helper
.
append_activation
(
out
)
def
elementwise_add
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
def
elementwise_div
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_div'
,
**
locals
()))
def
elementwise_sub
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_sub'
,
**
locals
()))
def
elementwise_mul
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_mul'
,
**
locals
()))
def
elementwise_max
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_max'
,
**
locals
()))
def
elementwise_min
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_min'
,
**
locals
()))
def
elementwise_pow
(
x
,
y
,
axis
=-
1
,
use_mkldnn
=
False
,
act
=
None
,
name
=
None
):
return
_elementwise_op
(
LayerHelper
(
'elementwise_pow'
,
**
locals
()))
for
func
in
[
elementwise_add
,
elementwise_div
,
elementwise_sub
,
elementwise_mul
,
elementwise_max
,
elementwise_min
,
elementwise_pow
]:
op_proto
=
OpProtoHolder
.
instance
().
get_op_proto
(
func
.
__name__
)
func
.
__doc__
=
_generate_doc_string_
(
op_proto
,
additional_args_lines
=
[
"act (basestring|None): Activation applied to the output."
,
"name (basestring|None): Name of the output."
])
python/paddle/fluid/layers/ops.py
浏览文件 @
a8f66365
...
...
@@ -37,15 +37,7 @@ __activations_noattr__ = [
__all__
=
[
'mean'
,
'mul'
,
'scale'
,
'sigmoid_cross_entropy_with_logits'
,
'elementwise_add'
,
'elementwise_div'
,
'elementwise_sub'
,
'elementwise_mul'
,
'elementwise_max'
,
'elementwise_min'
,
'elementwise_pow'
,
'clip'
,
'clip_by_norm'
,
'logical_and'
,
...
...
@@ -66,6 +58,11 @@ __all__ = [
for
_OP
in
set
(
__all__
):
globals
()[
_OP
]
=
generate_layer_fn
(
_OP
)
# It is a hot fix in some unittest using:
# fluid.layers.scale(x=x, scale=10.0, out=out_var)
# e.g.: test_program_code.py, test_dist_train.py
globals
()[
'_scale'
]
=
generate_layer_fn
(
'scale'
)
__all__
+=
__activations_noattr__
for
_OP
in
set
(
__activations_noattr__
):
...
...
python/paddle/fluid/tests/unittests/test_dist_train.py
浏览文件 @
a8f66365
...
...
@@ -27,6 +27,7 @@ import paddle.fluid.layers as layers
from
paddle.fluid.layers.io
import
ListenAndServ
from
paddle.fluid.layers.io
import
Recv
from
paddle.fluid.layers.io
import
Send
import
paddle.fluid.layers.ops
as
ops
from
paddle.fluid
import
core
...
...
@@ -89,7 +90,7 @@ class TestSendOp(unittest.TestCase):
name
=
"X"
,
append_batch_size
=
False
)
fluid
.
initializer
.
Constant
(
value
=
1.0
)(
x
,
main
.
global_block
())
layers
.
scale
(
x
=
x
,
scale
=
10.0
,
out
=
out_var
)
ops
.
_
scale
(
x
=
x
,
scale
=
10.0
,
out
=
out_var
)
self
.
server_exe
=
fluid
.
Executor
(
place
)
self
.
server_exe
.
run
(
main
)
...
...
python/paddle/fluid/tests/unittests/test_program_code.py
浏览文件 @
a8f66365
...
...
@@ -25,6 +25,7 @@ import paddle.fluid.layers as layers
from
paddle.fluid.layers.io
import
ListenAndServ
from
paddle.fluid.layers.io
import
Recv
from
paddle.fluid.layers.io
import
Send
import
paddle.fluid.layers.ops
as
ops
from
paddle.fluid.transpiler.details
import
program_to_code
...
...
@@ -52,7 +53,7 @@ class TestProgram2Code(unittest.TestCase):
name
=
"X"
,
append_batch_size
=
False
)
fluid
.
initializer
.
Constant
(
value
=
1.0
)(
x
,
main
.
global_block
())
layers
.
scale
(
x
=
x
,
scale
=
10.0
,
out
=
out_var
)
ops
.
_
scale
(
x
=
x
,
scale
=
10.0
,
out
=
out_var
)
program_to_code
(
main
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录