Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
b151d90b
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
1 年多 前同步成功
通知
696
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b151d90b
编写于
9月 29, 2018
作者:
X
Xin Pan
提交者:
GitHub
9月 29, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13661 from velconia/10_fix_api_local
10 fix api local
上级
598b2d1f
67558c59
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
225 addition
and
67 deletion
+225
-67
paddle/fluid/API.spec
paddle/fluid/API.spec
+6
-6
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+1
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+218
-54
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+0
-6
未找到文件。
paddle/fluid/API.spec
浏览文件 @
b151d90b
...
...
@@ -160,6 +160,12 @@ paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=['input', 'shap
paddle.fluid.layers.sum ArgSpec(args=['x', 'use_mkldnn'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.layers.slice ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.shape ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.logical_and ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.logical_or ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.logical_xor ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.logical_not ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.clip ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.clip_by_norm ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)
...
...
@@ -225,12 +231,6 @@ paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords=
paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.clip ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.clip_by_norm ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_or ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_xor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logical_not ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
b151d90b
...
...
@@ -21,7 +21,7 @@ from .. import core
from
..framework
import
Program
,
Variable
,
Operator
from
..layer_helper
import
LayerHelper
,
unique_name
from
..initializer
import
force_init_on_cpu
from
.
ops
import
logical_and
,
logical_not
,
logical_or
from
.
nn
import
logical_and
,
logical_not
,
logical_or
import
numpy
import
warnings
import
six
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
b151d90b
...
...
@@ -51,7 +51,9 @@ __all__ = [
'expand'
,
'sequence_concat'
,
'scale'
,
'elementwise_add'
,
'elementwise_div'
,
'elementwise_sub'
,
'elementwise_mul'
,
'elementwise_max'
,
'elementwise_min'
,
'elementwise_pow'
,
'uniform_random_batch_size_like'
,
'gaussian_random'
,
'sampling_id'
,
'gaussian_random_batch_size_like'
,
'sum'
,
'slice'
,
'shape'
'sampling_id'
,
'gaussian_random_batch_size_like'
,
'sum'
,
'slice'
,
'shape'
,
'logical_and'
,
'logical_or'
,
'logical_xor'
,
'logical_not'
,
'clip'
,
'clip_by_norm'
]
...
...
@@ -953,8 +955,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=-100):
soft_label (bool): a flag indicating whether to
interpretate the given labels as soft
labels. Default: `False`.
ignore_index (int): Specifies a target value that is ignored and does
not contribute to the input gradient. Only valid
ignore_index (int): Specifies a target value that is ignored and does
not contribute to the input gradient. Only valid
if soft_label is set to False. Default: -100
Returns:
...
...
@@ -2714,20 +2716,20 @@ def sequence_pad(x, pad_value, maxlen=None):
Args:
x(Variable): Input variable which should contain lod information.
pad_value(Variable): The Variable that holds values that will be fill
into padded steps. It can be a scalar or a tensor whose shape
equals to time steps in sequences. If it's a scalar, it will be
pad_value(Variable): The Variable that holds values that will be fill
into padded steps. It can be a scalar or a tensor whose shape
equals to time steps in sequences. If it's a scalar, it will be
automatically broadcasted to the shape of time step.
maxlen(int, default None): The length of padded sequences. It can be
None or any positive int. When it is None, all sequences will be
padded up to the length of the longest one among them; when it a
certain positive value, it must be greater than the length of the
maxlen(int, default None): The length of padded sequences. It can be
None or any positive int. When it is None, all sequences will be
padded up to the length of the longest one among them; when it a
certain positive value, it must be greater than the length of the
longest original sequence."
Returns:
Variable: The padded sequence batch and the original lengths before
Variable: The padded sequence batch and the original lengths before
padding. All sequences has the same length.
Examples:
.. code-block:: python
...
...
@@ -4343,8 +4345,8 @@ def softmax_with_cross_entropy(logits,
soft_label is set to true, Label is a Tensor<float/double> with
soft_label (bool): A flag to indicate whether to interpretate the given
labels as soft labels. By default, `soft_label` is set to False.
ignore_index (int): Specifies a target value that is ignored and does
not contribute to the input gradient. Only valid
ignore_index (int): Specifies a target value that is ignored and does
not contribute to the input gradient. Only valid
if soft_label is set to False. Default: -100
Returns:
...
...
@@ -4601,14 +4603,14 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):
def
squeeze
(
input
,
axes
,
name
=
None
):
"""
Remove single-dimensional entries from the shape of a tensor. Takes a
parameter axes with a list of axes to squeeze. If axes is not provided, all
the single dimensions will be removed from the shape. If an axis is
Remove single-dimensional entries from the shape of a tensor. Takes a
parameter axes with a list of axes to squeeze. If axes is not provided, all
the single dimensions will be removed from the shape. If an axis is
selected with shape entry not equal to one, an error is raised.
Examples:
Case 1:
Given
Given
X.shape = (1, 3, 1, 5)
and
axes = [0]
...
...
@@ -4617,11 +4619,11 @@ def squeeze(input, axes, name=None):
Case 2:
Given
X.shape = (1, 3, 1, 5)
and
and
axes = []
we get:
Out.shape = (3, 5)
Args:
input (Variable): The input variable to be squeezed.
axes (list): List of integers, indicating the dimensions to be squeezed.
...
...
@@ -4651,14 +4653,14 @@ def squeeze(input, axes, name=None):
def
unsqueeze
(
input
,
axes
,
name
=
None
):
"""
Insert single-dimensional entries to the shape of a tensor. Takes one
required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
Insert single-dimensional entries to the shape of a tensor. Takes one
required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
Given a tensor such that tensor with shape [3, 4, 5],
For example:
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
Args:
input (Variable): The input variable to be unsqueezed.
axes (list): List of integers, indicating the dimensions to be inserted.
...
...
@@ -5757,39 +5759,39 @@ def pad2d(input,
Example:
Given that X is a channel of image from input:
X = [[1, 2, 3],
[4, 5, 6]]
Case 0:
paddings = [0, 1, 2, 3],
mode = 'constant'
pad_value = 0
Out = [[0, 0, 1, 2, 3, 0, 0, 0]
[0, 0, 4, 5, 6, 0, 0, 0]
[0, 0, 0, 0, 0, 0, 0, 0]]
Case 1:
paddings = [0, 1, 2, 1],
mode = 'reflect'
Out = [[3, 2, 1, 2, 3, 2]
[6, 5, 4, 5, 6, 5]
[3, 2, 1, 2, 3, 2]]
Case 2:
paddings = [0, 1, 2, 1],
mode = 'edge'
Out = [[1, 1, 1, 2, 3, 3]
[4, 4, 4, 5, 6, 6]
[4, 4, 4, 5, 6, 6]]
Args:
input (Variable): The input image with [N, C, H, W] format or [N, H, W, C] format.
paddings (tuple|list): The padding size. If padding is a tuple, it must
...
...
@@ -5988,7 +5990,7 @@ def prelu(x, mode, param_attr=None, name=None):
channel:elements in a channel share same weight
element:each element has a weight
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
will be named automatically.
Returns:
Variable: The output tensor with the same shape as input.
...
...
@@ -6166,10 +6168,10 @@ def flatten(x, axis=1, name=None):
def
sequence_enumerate
(
input
,
win_size
,
pad_value
=
0
,
name
=
None
):
"""
Generate a new sequence for the input index sequence, which enumerates all the
sub-sequences with length `win_size` of the input.
sub-sequences with length `win_size` of the input.
The enumerated sequence has the same 1st dimension with variable `input`, and
the 2nd dimension is `win_size`, padded by `pad_value` if necessary in generation.
Examples:
Case 1:
Input:
...
...
@@ -6296,20 +6298,20 @@ def unstack(x, axis=0, num=None):
**UnStack Layer**
This layer unstacks input :code:`x` into several tensors along axis.
If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`.
If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`,
and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is
raised.
raised.
Args:
x (Variable): Input variable.
x (Variable): Input variable.
axis (int): The axis along which the input is unstacked.
num (int|None): The number of output variables.
Returns:
list(Variable): The unstacked variables.
"""
helper
=
LayerHelper
(
'unstack'
,
**
locals
())
...
...
@@ -6342,21 +6344,21 @@ def expand(x, expand_times, name=None):
.. code-block:: text
Input(X) is a 3-D tensor with shape [2, 3, 1]:
[
[[1], [2], [3]],
[[4], [5], [6]]
]
Attr(expand_times): [1, 2, 2]
Output(Out) is a 3-D tensor with shape [2, 6, 2]:
[
[[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
[[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
]
Args:
x (Variable): A tensor with rank in [1, 6].
expand_times (list|tuple): Expand times number for each dimension.
...
...
@@ -6658,7 +6660,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
bias(${bias_type}): ${bias_comment}
bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment}
act(basestring|None): Activation applied to the output.
name(basestring|None): Name of the output.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
...
...
@@ -6722,3 +6724,165 @@ for func in [
"act (basestring|None): Activation applied to the output."
,
"name (basestring|None): Name of the output."
])
def
_logical_op
(
op_name
,
x
,
y
,
out
=
None
,
name
=
None
,
binary_op
=
True
):
helper
=
LayerHelper
(
op_name
,
**
locals
())
if
binary_op
:
assert
x
.
dtype
==
y
.
dtype
if
out
is
None
:
if
name
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
if
binary_op
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
})
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
@
templatedoc
()
def
logical_and
(
x
,
y
,
out
=
None
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
return
_logical_op
(
op_name
=
"logical_and"
,
x
=
x
,
y
=
y
,
name
=
name
,
out
=
out
,
binary_op
=
True
)
@
templatedoc
()
def
logical_or
(
x
,
y
,
out
=
None
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
return
_logical_op
(
op_name
=
"logical_or"
,
x
=
x
,
y
=
y
,
name
=
name
,
out
=
out
,
binary_op
=
True
)
@
templatedoc
()
def
logical_xor
(
x
,
y
,
out
=
None
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
return
_logical_op
(
op_name
=
"logical_xor"
,
x
=
x
,
y
=
y
,
name
=
name
,
out
=
out
,
binary_op
=
True
)
@
templatedoc
()
def
logical_not
(
x
,
out
=
None
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
return
_logical_op
(
op_name
=
"logical_not"
,
x
=
x
,
y
=
None
,
name
=
name
,
out
=
out
,
binary_op
=
False
)
@
templatedoc
()
def
clip
(
x
,
min
,
max
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
min(${min_type}): ${min_comment}
max(${max_type}): ${max_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
"clip"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
"clip"
,
inputs
=
{
"X"
:
x
},
attrs
=
{
"min"
:
min
,
"max"
:
max
},
outputs
=
{
"Out"
:
out
})
return
out
@
templatedoc
()
def
clip_by_norm
(
x
,
max_norm
,
name
=
None
):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
max_norm(${max_norm_type}): ${max_norm_comment}
name(basestring|None): Name of the output.
Returns:
out(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
"clip_by_norm"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
"clip_by_norm"
,
inputs
=
{
"X"
:
x
},
attrs
=
{
"max_norm"
:
max_norm
},
outputs
=
{
"Out"
:
out
})
return
out
python/paddle/fluid/layers/ops.py
浏览文件 @
b151d90b
...
...
@@ -39,12 +39,6 @@ __all__ = [
'mean'
,
'mul'
,
'sigmoid_cross_entropy_with_logits'
,
'clip'
,
'clip_by_norm'
,
'logical_and'
,
'logical_or'
,
'logical_xor'
,
'logical_not'
,
'maxout'
,
]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录