Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
745ea4dc
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
745ea4dc
编写于
6月 15, 2018
作者:
Y
Yu Yang
提交者:
GitHub
6月 15, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #11354 from reyoung/feature/polish_doc
Polish documentation
上级
4c3eb448
055df470
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
553 addition
and
261 deletion
+553
-261
paddle/fluid/operators/activation_op.cc
paddle/fluid/operators/activation_op.cc
+17
-17
paddle/fluid/operators/compare_op.cc
paddle/fluid/operators/compare_op.cc
+15
-19
paddle/fluid/operators/cumsum_op.cc
paddle/fluid/operators/cumsum_op.cc
+7
-7
paddle/fluid/operators/layer_norm_op.cc
paddle/fluid/operators/layer_norm_op.cc
+17
-16
paddle/fluid/operators/multiplex_op.cc
paddle/fluid/operators/multiplex_op.cc
+32
-12
paddle/fluid/operators/reader/create_recordio_file_reader_op.cc
.../fluid/operators/reader/create_recordio_file_reader_op.cc
+7
-3
paddle/fluid/operators/reader/reader_op_registry.cc
paddle/fluid/operators/reader/reader_op_registry.cc
+1
-1
paddle/fluid/operators/row_conv_op.cc
paddle/fluid/operators/row_conv_op.cc
+19
-5
paddle/fluid/operators/uniform_random_op.cc
paddle/fluid/operators/uniform_random_op.cc
+7
-15
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+178
-14
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+83
-57
python/paddle/fluid/layers/io.py
python/paddle/fluid/layers/io.py
+36
-16
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+36
-72
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+85
-4
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+13
-3
未找到文件。
paddle/fluid/operators/activation_op.cc
浏览文件 @
745ea4dc
...
...
@@ -271,18 +271,18 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
void
Make
()
override
{
AddInput
(
"X"
,
"Input of HardShrink operator"
);
AddOutput
(
"Out"
,
"Output of HardShrink operator"
);
AddAttr
<
float
>
(
"threshold"
,
"The value of threshold for HardShrink"
)
AddAttr
<
float
>
(
"threshold"
,
"The value of threshold for HardShrink. [default: 0.5]"
)
.
SetDefault
(
0.5
f
);
AddComment
(
R"DOC(
HardShrink Activation Operator.
:strong:`HardShrink activation operator`
$$
out = \begin{cases}
x, \text{if } x > \lambda \\
x, \text{if } x < -\lambda \\
0, \text{otherwise}
\end{cases}
$$
.. math::
out = \begin{cases}
x, \text{if } x > \lambda \\
x, \text{if } x < -\lambda \\
0, \text{otherwise}
\end{cases}
)DOC"
);
}
...
...
@@ -394,18 +394,18 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
void
Make
()
override
{
AddInput
(
"X"
,
"Input of ThresholdedRelu operator"
);
AddOutput
(
"Out"
,
"Output of ThresholdedRelu operator"
);
AddAttr
<
float
>
(
"threshold"
,
"The threshold location of activation"
)
AddAttr
<
float
>
(
"threshold"
,
"The threshold location of activation. [default 1.0]."
)
.
SetDefault
(
1.0
f
);
AddComment
(
R"DOC(
ThresholdedRelu Activation Operator.
:strong:`ThresholdedRelu activation operator`
$$
out = \begin{cases}
x, \text{if } x > threshold \\
0, \text{otherwise}
\end{cases}
$$
.. math::
out = \begin{cases}
x, \text{if } x > threshold \\
0, \text{otherwise}
\end{cases}
)DOC"
);
}
};
...
...
paddle/fluid/operators/compare_op.cc
浏览文件 @
745ea4dc
...
...
@@ -23,30 +23,26 @@ class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void
Make
()
override
{
OpComment
comment
;
AddInput
(
"X"
,
string
::
Sprintf
(
"(LoDTensor) the left hand operand of %s operator"
,
comment
.
type
));
AddInput
(
"Y"
,
string
::
Sprintf
(
"(LoDTensor) the right hand operand of %s operator"
,
comment
.
type
));
AddInput
(
"X"
,
string
::
Sprintf
(
"the left hand operand of %s operator"
,
comment
.
type
));
AddInput
(
"Y"
,
string
::
Sprintf
(
"the right hand operand of %s operator"
,
comment
.
type
));
AddAttr
<
bool
>
(
"force_cpu"
,
"
(bool, default false)
Force fill output variable to cpu "
"Force fill output variable to cpu "
"memory. Otherwise, fill output variable to the running "
"device"
)
.
SetDefault
(
false
);
AddOutput
(
"Out"
,
string
::
Sprintf
(
"(LoDTensor) n-dim bool tensor. Each element is %s"
,
comment
.
equation
));
AddComment
(
string
::
Sprintf
(
R"DOC(%s Operator
"device [default true]."
)
.
SetDefault
(
true
);
AddOutput
(
"Out"
,
string
::
Sprintf
(
"n-dim bool tensor. Each element is %s"
,
comment
.
equation
));
AddComment
(
string
::
Sprintf
(
R"DOC(
It operates element-wise on X and Y, and returns the Out. Each of them is a
N-dim tensor. X and Y could be any type. The each element of the Out tensor is
calculated by
%s
calculated by
$%s$
)DOC"
,
comment
.
type
,
comment
.
equation
));
AddAttr
<
int
>
(
"axis"
,
"(int, default -1). The start dimension index "
"for broadcasting Y onto X.
"
)
comment
.
equation
));
AddAttr
<
int
>
(
"axis"
,
"The start dimension index for broadcasting Y onto X. [default -1]
"
)
.
SetDefault
(
-
1
)
.
EqualGreaterThan
(
-
1
);
}
...
...
paddle/fluid/operators/cumsum_op.cc
浏览文件 @
745ea4dc
...
...
@@ -30,19 +30,19 @@ class CumOp : public framework::OperatorWithKernel {
class
CumsumOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"X"
,
"Input of
C
umsum operator"
);
AddOutput
(
"Out"
,
"Output of
C
umsum operator"
);
AddInput
(
"X"
,
"Input of
c
umsum operator"
);
AddOutput
(
"Out"
,
"Output of
c
umsum operator"
);
AddAttr
<
int
>
(
"axis"
,
"
(int, default -1). The dimenstion to accumulate along.
"
"
-1 means the last dimenstion
"
)
"
The dimenstion to accumulate along. -1 means the last
"
"
dimenstion [default -1].
"
)
.
SetDefault
(
-
1
)
.
EqualGreaterThan
(
-
1
);
AddAttr
<
bool
>
(
"exclusive"
,
"
bool, default false). Whether to perform exclusive cumsum
"
)
"
Whether to perform exclusive cumsum. [default false].
"
)
.
SetDefault
(
false
);
AddAttr
<
bool
>
(
"reverse"
,
"
bool, default false). If true, the cumsum is performed in
"
"
the reversed direction
"
)
"
If true, the cumsum is performed in the reversed direction.
"
"
[default false].
"
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
The cumulative sum of the elements along a given axis.
...
...
paddle/fluid/operators/layer_norm_op.cc
浏览文件 @
745ea4dc
...
...
@@ -62,36 +62,33 @@ class LayerNormOp : public framework::OperatorWithKernel {
class
LayerNormOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"X"
,
"
(LoDTensor)
The input tensor."
);
AddInput
(
"X"
,
"The input tensor."
);
AddInput
(
"Scale"
,
"(
Tensor,
optional) Scale is a 1-dimensional tensor of size "
"(optional) Scale is a 1-dimensional tensor of size "
"H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
"It is applied to the output."
)
.
AsDispensable
();
AddInput
(
"Bias"
,
"(
Tensor,
optional) Bias is a 1-dimensional tensor of size "
"(optional) Bias is a 1-dimensional tensor of size "
"H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
"It is applied to the output."
)
.
AsDispensable
();
AddOutput
(
"Y"
,
"(LoDTensor) Result after normalization."
);
AddOutput
(
"Mean"
,
"(Tensor) Mean of the current mini batch."
)
.
AsIntermediate
();
AddOutput
(
"Variance"
,
"(Tensor) Variance of the current mini batch."
)
AddOutput
(
"Y"
,
"Result after normalization."
);
AddOutput
(
"Mean"
,
"Mean of the current mini batch."
).
AsIntermediate
();
AddOutput
(
"Variance"
,
"Variance of the current mini batch."
)
.
AsIntermediate
();
AddAttr
<
float
>
(
"epsilon"
,
"(float, default 1e-5) Constant for "
"numerical stability"
)
"Constant for numerical stability [default 1e-5]."
)
.
SetDefault
(
1e-5
)
.
AddCustomChecker
([](
const
float
&
epsilon
)
{
PADDLE_ENFORCE
(
epsilon
>=
0.0
f
&&
epsilon
<=
0.001
f
,
"'epsilon' should be between 0.0 and 0.001."
);
});
AddAttr
<
int
>
(
"begin_norm_axis"
,
"(int default:1), the "
"axis of `begin_norm_axis ... Rank(X) - 1` will be "
"the axis of `begin_norm_axis ... Rank(X) - 1` will be "
"normalized. `begin_norm_axis` splits the tensor(`X`) to a "
"matrix [N,H]."
)
"matrix [N,H].
[default 1].
"
)
.
SetDefault
(
1
)
.
AddCustomChecker
([](
const
int
&
begin_norm_axis
)
{
PADDLE_ENFORCE_GT
(
begin_norm_axis
,
0
,
...
...
@@ -99,10 +96,14 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker {
});
AddComment
(
R"DOC(
Layer Normalization.
Layer Norm has been implemented as discussed in the paper:
https://arxiv.org/abs/1607.06450
...
Assume feature vectors exist on dimensions
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
along these dimensions for each feature vector :math:`a` with size
:math:`H`, then normalize each feature vector using the corresponding
statistics. After that, apply learnable gain and bias on the normalized
tensor to scale and shift if :attr:`scale` and :attr:`shift` are set.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
)DOC"
);
}
};
...
...
paddle/fluid/operators/multiplex_op.cc
浏览文件 @
745ea4dc
...
...
@@ -62,26 +62,46 @@ class MultiplexOp : public framework::OperatorWithKernel {
class
MultiplexOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"Ids"
,
"The index tensor of multiplex operator."
);
AddInput
(
"X"
,
"The candidate tensors of multiplex operator."
)
AddInput
(
"Ids"
,
"Tensor<int32>, index variable which is a 2-D tensor with shape "
"[M, 1] where M is the batch size."
);
AddInput
(
"X"
,
"A list of variables to gather from. All variables have the same "
"shape and the rank is at least 2."
)
.
AsDuplicable
();
AddOutput
(
"Out"
,
"The output tensor of multiplex operator."
);
AddComment
(
R"DOC(
Multiplex Operator.
Multiplex multiple tensors according to the index provided by the index tensor.
Ids: the index tensor.
X[0 : N - 1]: the candidate tensors for output (N >= 2).
For each index i from 0 to batchSize - 1, the output is the i-th row of the
Referring to the given index variable, this layer selects rows from the
input variables to construct a multiplex variable. Assuming that there are
:math:`m` input variables and :math:`I_i` represents the i-th input
variable and :math:`i` is in [0, :math:`m`). All input variables are
tensors with same shape [:math:`d_0`, :math:`d_1`, ..., :math:`d_R`].
Please note that rank of the input tensor should be at least 2. Each input
variable will be treated as a 2-D matrix with shape [:math:`M`, :math:`N`]
where :math:`M` for :math:`d_0` and :math:`N` for :math:`d_1` * :math:`d_2`
* ... * :math:`d_R`. Let :math:`I_i[j]` be the j-th row of the i-th input
variable. The given index variable should be a 2-D tensor with shape
[:math:`M`, 1]. Let `ID[i]` be the i-th index value of the index variable.
Then the output variable will be a tensor with shape [:math:`d_0`,
:math:`d_1`, ..., :math:`d_R`]. If we treat the output tensor as a 2-D
matrix with shape [:math:`M`, :math:`N`] and let :math:`O[i]` be the i-th
row of the matrix, then `O[i]` is equal to :math:`I_{ID[i]}[i]`.
* Ids: the index tensor.
* X[0 : N - 1]: the candidate tensors for output (N >= 2).
* For each index i from 0 to batchSize - 1, the output is the i-th row of the
the (Ids[i])-th tensor.
For i-th row of the output tensor:
$$y[i] = x_{k}[i]$$
$$
y[i] = x_{k}[i]
$$
where
`y` is the output tensor, `x_{k}`
is the k-th input tensor,
and
`k = Ids[i]`
.
where
$y$ is the output tensor, $x_{k}$
is the k-th input tensor,
and
$k = Ids[i]$
.
)DOC"
);
}
...
...
paddle/fluid/operators/reader/create_recordio_file_reader_op.cc
浏览文件 @
745ea4dc
...
...
@@ -78,11 +78,15 @@ class CreateRecordIOReaderOp : public framework::OperatorBase {
class
CreateRecordIOReaderOpMaker
:
public
FileReaderMakerBase
{
protected:
void
Apply
()
override
{
AddAttr
<
std
::
string
>
(
"filename"
,
"The filename of record io reader"
);
AddAttr
<
std
::
string
>
(
"filename"
,
"The filename of record file. This file will given to reader."
);
AddComment
(
R"DOC(
CreateRecordIOReader Operator
Open a recordio file and return the reader object. The returned reader object
is thread-safe.
Create a reader from a record io file
NOTE: This is a very low-level API. It is used for debugging data file or
training. Please use `open_files` instead of this API for production usage.
)DOC"
);
}
};
...
...
paddle/fluid/operators/reader/reader_op_registry.cc
浏览文件 @
745ea4dc
...
...
@@ -54,7 +54,7 @@ std::unique_ptr<framework::ReaderBase> CreateReaderByFileName(
}
void
FileReaderMakerBase
::
Make
()
{
AddOutput
(
"Out"
,
"(ReaderHolder) The created random reader."
).
AsDuplicable
();
AddOutput
(
"Out"
,
"(ReaderHolder)
:
The created random reader."
).
AsDuplicable
();
AddAttr
<
std
::
vector
<
int
>>
(
"shape_concat"
,
"The concat of all data's shapes."
);
AddAttr
<
std
::
vector
<
int
>>
(
"ranks"
,
...
...
paddle/fluid/operators/row_conv_op.cc
浏览文件 @
745ea4dc
...
...
@@ -78,23 +78,23 @@ class RowConvOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void
Make
()
override
{
AddInput
(
"X"
,
"
(LoDTensor),
the input(X) is a LodTensor, which supports "
"the input(X) is a LodTensor, which supports "
"variable time-length input sequences. The underlying tensor "
"in this LoDTensor is a matrix with shape (T x N), where T "
"is the total time steps in this mini-batch and N is the input "
"data dimension."
);
AddInput
(
"Filter"
,
"
(Tensor),
the input(Filter) is a learnable parameter. It "
"the input(Filter) is a learnable parameter. It "
"is a 2-D tensor with shape (future_context x N), where, "
"future_context is the future context length and N is the data "
"dimension."
);
AddOutput
(
"Out"
,
"
(LoDTensor),
the output(Out) is a LodTensor, which supports "
"the output(Out) is a LodTensor, which supports "
"variable time-length input sequences. The underlying tensor "
"in this LodTensor is a matrix with shape T x N, i.e., the "
"same shape as X."
);
AddComment
(
R"DOC(
Row-convolution Operator.
:strong:`Row-convolution operator`
The row convolution is called lookahead convolution. This operator was
introduced in the following paper for DeepSpeech2:
...
...
@@ -114,9 +114,23 @@ and a filter ($W$) of size $context \times d$,
the output sequence is convolved as:
$$
out_{i, :} = \
sum_{j=i}^{i + context} in_{j,:} \
dot W_{i-j, :}
out_{i, :} = \
\sum_{j=i}^{i + context} in_{j,:} \\c
dot W_{i-j, :}
$$
In the above equation:
* $Out_{i}$: The i-th row of output variable with shape [1, D].
* $\\tau$: Future context size.
* $X_{j}$: The j-th row of input variable with shape [1, D].
* $W_{i-j}$: The (i-j)-th row of parameters with shape [1, D].
More details about row_conv please refer to
the design document
https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .
)DOC"
);
}
};
...
...
paddle/fluid/operators/uniform_random_op.cc
浏览文件 @
745ea4dc
...
...
@@ -86,32 +86,24 @@ class UniformRandomOp : public framework::OperatorWithKernel {
class
UniformRandomOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddOutput
(
"Out"
,
"
(Tensor)
The output tensor of uniform random op"
);
AddOutput
(
"Out"
,
"The output tensor of uniform random op"
);
AddComment
(
R"DOC(
Uniform random operator.
This operator initializes a tensor with random values sampled from a
uniform distribution.
uniform distribution.
The random result is in set [min, max].
)DOC"
);
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
"(vector<int>) The shape of the output tensor"
);
AddAttr
<
float
>
(
"min"
,
"(float, default -1.0) "
"Minimum value of uniform random"
)
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
"The shape of the output tensor"
);
AddAttr
<
float
>
(
"min"
,
"Minimum value of uniform random. [default -1.0]."
)
.
SetDefault
(
-
1.0
f
);
AddAttr
<
float
>
(
"max"
,
"(float, default 1.0) "
"Maximun value of uniform random"
)
AddAttr
<
float
>
(
"max"
,
"Maximun value of uniform random. [default 1.0]."
)
.
SetDefault
(
1.0
f
);
AddAttr
<
int
>
(
"seed"
,
"(int, default 0) "
"Random seed used for generating samples. "
"0 means use a seed generated by the system."
"Note that if seed is not 0, this operator will always "
"generate the same random numbers every time."
)
"generate the same random numbers every time.
[default 0].
"
)
.
SetDefault
(
0
);
AddAttr
<
int
>
(
"dtype"
,
"
(int, default 5(FP32)) Output tensor data type
"
)
AddAttr
<
int
>
(
"dtype"
,
"
Output tensor data type. [default 5(FP32)].
"
)
.
SetDefault
(
framework
::
proto
::
VarType
::
FP32
);
}
};
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
745ea4dc
...
...
@@ -20,6 +20,7 @@ from ..framework import Program, Variable, Operator
from
..layer_helper
import
LayerHelper
,
unique_name
from
..initializer
import
force_init_on_cpu
from
ops
import
logical_and
,
logical_not
,
logical_or
import
numpy
__all__
=
[
'split_lod_tensor'
,
...
...
@@ -909,37 +910,40 @@ def create_array(dtype):
dtype
=
dtype
)
def
less_than
(
x
,
y
,
force_cpu
=
True
,
cond
=
None
,
**
ignored
):
@
templatedoc
()
def
less_than
(
x
,
y
,
force_cpu
=
None
,
cond
=
None
,
**
ignored
):
"""
**Less than**
${comment}
This layer returns the truth value of :math:`x < y` elementwise.
>>> import paddle.fluid as fluid
>>> less = fluid.layers.less_than(x=label, y=limit)
Args:
x(
Variable): First operand of *less_than*
y(
Variable): Second operand of *less_than*
force_cpu(
Bool|True): The output data will be on CPU if set true
.
x(
${x_type}): ${x_comment}.
y(
${y_type}): ${y_comment}.
force_cpu(
${force_cpu_type}): ${force_cpu_comment}
.
cond(Variable|None): Optional output variable to store the result of *less_than*
Returns:
Variable: The tensor variable storing the output of *less_than*.
Examples:
.. code-block:: python
less = fluid.layers.less_than(x=label, y=limit)
${out_comment}.
"""
helper
=
LayerHelper
(
"less_than"
,
**
locals
())
if
cond
is
None
:
cond
=
helper
.
create_tmp_variable
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
attrs
=
dict
()
if
force_cpu
is
not
None
:
attrs
[
'force_cpu'
]
=
force_cpu
elif
force_init_on_cpu
():
attrs
[
'force_cpu'
]
=
force_init_on_cpu
()
helper
.
append_op
(
type
=
'less_than'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
cond
]},
attrs
=
{
'force_cpu'
:
force_cpu
or
force_init_on_cpu
()}
)
attrs
=
attrs
)
return
cond
...
...
@@ -1004,8 +1008,28 @@ def array_read(array, i):
def
shrink_memory
(
x
,
i
,
table
):
"""
This function creates an operator to shrink
_rnn_
memory using the RankTable
This function creates an operator to shrink
rnn
memory using the RankTable
as mentioned in the input parameter.
NOTE: This API is very low-level API. It is used by DynamicRNN only.
Since the Dynamic RNN uses no-padding way to implement RNN. The sequence
will be sorted by order, and the length of valid memory will be shrink after
each time step.
Args:
x(Variable): The memory object in the previous time step.
i(Variable): The step count variable. A int scalar as LoDTensor.
table(Variable): The RNNRankTable object.
Returns:
the memory variable after shrink.
Examples:
Since this API is very low level API. The example is not provided.
Please reference the implementation of class DynamicRNN for detail
usage.
"""
helper
=
LayerHelper
(
'shrink_memory'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
...
...
@@ -1339,6 +1363,38 @@ class IfElse(object):
class
DynamicRNN
(
object
):
"""
The dynamic RNN can process a batch of sequence data. The length of each
sample sequence can be different. This API automatically process them in
batch.
The input lod must be set. Please reference `lod_tensor`
>>> import paddle.fluid as fluid
>>> data = fluid.layers.data(name='sentence', dtype='int64', lod_level=1)
>>> embedding = fluid.layers.embedding(input=data, size=[65535, 32],
>>> is_sparse=True)
>>>
>>> drnn = fluid.layers.DynamicRNN()
>>> with drnn.block():
>>> word = drnn.step_input(embedding)
>>> prev = drnn.memory(shape=[200])
>>> hidden = fluid.layers.fc(input=[word, prev], size=200, act='relu')
>>> drnn.update_memory(prev, hidden) # set prev to hidden
>>> drnn.output(hidden)
>>>
>>> # last is the last time step of rnn. It is the encoding result.
>>> last = fluid.layers.sequence_last_step(drnn())
The dynamic RNN will unfold sequence into timesteps. Users need to define
how to process each time step during the :code:`with` block.
The `memory` is used staging data cross time step. The initial value of
memory can be zero or another variable.
The dynamic RNN can mark multiple variables as its output. Use `drnn()` to
get the output sequence.
"""
BEFORE_RNN
=
0
IN_RNN
=
1
AFTER_RNN
=
2
...
...
@@ -1361,6 +1417,15 @@ class DynamicRNN(object):
self
.
mem_link
=
[]
def
step_input
(
self
,
x
):
"""
Mark a sequence as a dynamic RNN input.
Args:
x(Variable): The input sequence.
Returns:
The current timestep in the input sequence.
"""
self
.
_assert_in_rnn_block_
(
"step_input"
)
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
...
...
@@ -1404,6 +1469,15 @@ class DynamicRNN(object):
return
array_read
(
array
=
input_array
,
i
=
self
.
step_idx
)
def
static_input
(
self
,
x
):
"""
Mark a variable as a RNN input. The input will not be scattered into
time steps.
Args:
x(Variable): The input variable.
Returns:
The input variable that can access in RNN.
"""
self
.
_assert_in_rnn_block_
(
"static_input"
)
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
...
...
@@ -1425,6 +1499,10 @@ class DynamicRNN(object):
@
contextlib
.
contextmanager
def
block
(
self
):
"""
The block for user to define operators in RNN. See the class docstring
for more details.
"""
if
self
.
status
!=
DynamicRNN
.
BEFORE_RNN
:
raise
ValueError
(
"rnn.block() can only be invoke once"
)
self
.
step_idx
=
fill_constant
(
...
...
@@ -1451,6 +1529,9 @@ class DynamicRNN(object):
x
=
each_array
,
table
=
self
.
lod_rank_table
))
def
__call__
(
self
,
*
args
,
**
kwargs
):
"""
Get the output of RNN. This API should only be invoked after RNN.block()
"""
if
self
.
status
!=
DynamicRNN
.
AFTER_RNN
:
raise
ValueError
((
"Output of the dynamic RNN can only be visited "
"outside the rnn block."
))
...
...
@@ -1465,6 +1546,70 @@ class DynamicRNN(object):
value
=
0.0
,
need_reorder
=
False
,
dtype
=
'float32'
):
"""
Create a memory variable for dynamic rnn.
If the :code:`init` is not None, :code:`memory` will be initialized by
this variable. The :code:`need_reorder` is used to reorder the memory as
the input variable. It should be set to true when the initialized memory
depends on the input sample.
For example,
>>> import paddle.fluid as fluid
>>> sentence = fluid.layers.data(
>>> name='sentence', dtype='float32', shape=[32])
>>> boot_memory = fluid.layers.data(
>>> name='boot', dtype='float32', shape=[10])
>>>
>>> drnn = fluid.layers.DynamicRNN()
>>> with drnn.block():
>>> word = drnn.step_input(sentence)
>>> memory = drnn.memory(init=boot_memory, need_reorder=True)
>>> hidden = fluid.layers.fc(
>>> input=[word, memory], size=10, act='tanh')
>>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
>>> drnn.output(hidden)
>>> rnn_output = drnn()
Otherwise, if :code:`shape`, :code:`value`, :code:`dtype` are set, the
:code:`memory` will be initialized by this :code:`value`.
For example,
>>> import paddle.fluid as fluid
>>> sentence = fluid.layers.data(
>>> name='sentence', dtype='float32', shape=[32])
>>>
>>> drnn = fluid.layers.DynamicRNN()
>>> with drnn.block():
>>> word = drnn.step_input(sentence)
>>> memory = drnn.memory(shape=[10], dtype='float32', value=0)
>>> hidden = fluid.layers.fc(
>>> input=[word, memory], size=10, act='tanh')
>>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
>>> drnn.output(hidden)
>>> rnn_output = drnn()
Args:
init(Variable|None): The initialized variable.
shape(list|tuple): The memory shape. NOTE the shape does not contain
batch_size.
value(float): the initalized value.
need_reorder(bool): True if the initialized memory depends on the
input sample.
dtype(str|numpy.dtype): The data type of the initialized memory.
Returns:
the memory variable.
"""
self
.
_assert_in_rnn_block_
(
'memory'
)
if
init
is
not
None
:
if
not
isinstance
(
init
,
Variable
):
...
...
@@ -1532,6 +1677,16 @@ class DynamicRNN(object):
return
self
.
memory
(
init
=
init
)
def
update_memory
(
self
,
ex_mem
,
new_mem
):
"""
Update the memory from ex_mem to new_mem. NOTE that the shape and data
type of :code:`ex_mem` and :code:`new_mem` must be same.
Args:
ex_mem(Variable): the memory variable.
new_mem(Variable): the plain variable generated in RNN block.
Returns:
None
"""
self
.
_assert_in_rnn_block_
(
'update_memory'
)
if
not
isinstance
(
ex_mem
,
Variable
):
raise
TypeError
(
"The input arg `ex_mem` of update_memory() must "
...
...
@@ -1549,6 +1704,15 @@ class DynamicRNN(object):
self
.
mem_link
.
append
((
new_mem
,
mem_array
))
def
output
(
self
,
*
outputs
):
"""
mark the RNN output variables.
Args:
outputs: The output variables.
Returns:
None
"""
self
.
_assert_in_rnn_block_
(
'output'
)
parent_block
=
self
.
_parent_block_
()
for
each
in
outputs
:
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
745ea4dc
...
...
@@ -210,53 +210,68 @@ def bipartite_match(dist_matrix,
dist_threshold
=
None
,
name
=
None
):
"""
**Bipartite matchint operator**
This operator is a greedy bipartite matching algorithm, which is used to
obtain the matching with the maximum distance based on the input
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row, also can find the matched row for
each column. And this operator only calculate matched indices from column
to row. For each instance, the number of matched indices is the number of
of columns of the input ditance matrix.
There are two outputs to save matched indices and distance.
A simple description, this algothrim matched the best (maximum distance)
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
Please note that
the input DistMat can be LoDTensor (with LoD) or Tensor.
NOTE:
the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. It is pair-wise distance matrix between the entities
represented by each row and each column. For example, assumed one
entity is A with shape [K], another entity is B with shape [M]. The
dist_matirx[i][j] is the distance between A[i] and B[j]. The bigger
the distance is, the better macthing the pairs are. Please note,
This tensor can contain LoD information to represent a batch of
inputs. One instance of this batch can contain different numbers of
entities.
dist_matrix[i][j] is the distance between A[i] and B[j]. The bigger
the distance is, the better matching the pairs are.
NOTE: This tensor can contain LoD information to represent a batch
of inputs. One instance of this batch can contain different numbers
of entities.
match_type(string|None): The type of matching method, should be
'bipartite' or 'per_prediction'
, 'bipartite' by defalut
.
'bipartite' or 'per_prediction'
. [default 'bipartite']
.
dist_threshold(float|None): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by defa
lu
t.
on the maximum distance, 0.5 by defa
ul
t.
Returns:
match_indices(Variable): A 2-D Tensor with shape [N, M] in int type.
N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
match_distance(Variable): A 2-D Tensor with shape [N, M] in float type.
N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] = dist_matrix[d+LoD[i]][j].
tuple: a tuple with two elements is returned. The first is
matched_indices, the second is matched_distance.
The matched_indices is a 2-D Tensor with shape [N, M] in int type.
N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
The matched_distance is a 2-D Tensor with shape [N, M] in float type
. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> x = fluid.layers.data(name='x', shape=[4], dtype='float32')
>>> y = fluid.layers.data(name='y', shape=[4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper
=
LayerHelper
(
'bipartite_match'
,
**
locals
())
match_indices
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
...
...
@@ -364,7 +379,7 @@ def ssd_loss(location,
normalize
=
True
,
sample_size
=
None
):
"""
**Multi-box loss layer for object dection algorithm of SSD**
**Multi-box loss layer for object de
te
ction algorithm of SSD**
This layer is to compute dection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth boudding
...
...
@@ -372,21 +387,35 @@ def ssd_loss(location,
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched boundding box by bipartite matching algorithm.
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched boundding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.1 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
...
...
@@ -421,39 +450,36 @@ def ssd_loss(location,
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by defa
lu
t.
of output locations, True by defa
ul
t.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable: The weighted sum of the localization loss and confidence loss,
with shape [N * Np, 1], N and Np are the same as they are
in `location`.
The weighted sum of the localization loss and confidence loss, with
\
shape [N * Np, 1], N and Np are the same as they are in `location`.
Raises:
ValueError: If mining_type is 'hard_example', now only support
mining
type of `max_negative`.
ValueError: If mining_type is 'hard_example', now only support
mining
\
type of `max_negative`.
Examples:
.. code-block:: python
pb = layers.data(
name='prior_box',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
pbv = layers.data(
name='prior_box_var',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
loc = layers.data(name='target_box', shape=[10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = layers.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = layers.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
>>> pb = fluid.layers.data(
>>> name='prior_box',
>>> shape=[10, 4],
>>> append_batch_size=False,
>>> dtype='float32')
>>> pbv = fluid.layers.data(
>>> name='prior_box_var',
>>> shape=[10, 4],
>>> append_batch_size=False,
>>> dtype='float32')
>>> loc = fluid.layers.data(name='target_box', shape=[10, 4], dtype='float32')
>>> scores = fluid.layers.data(name='scores', shape=[10, 21], dtype='float32')
>>> gt_box = fluid.layers.data(
>>> name='gt_box', shape=[4], lod_level=1, dtype='float32')
>>> gt_label = fluid.layers.data(
>>> name='gt_label', shape=[1], lod_level=1, dtype='float32')
>>> loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper
=
LayerHelper
(
'ssd_loss'
,
**
locals
())
...
...
python/paddle/fluid/layers/io.py
浏览文件 @
745ea4dc
...
...
@@ -292,6 +292,7 @@ def _copy_reader_create_op_(block, op):
return
new_op
@
templatedoc
(
op_type
=
'create_recordio_file_reader'
)
def
open_recordio_file
(
filename
,
shapes
,
lod_levels
,
...
...
@@ -299,34 +300,30 @@ def open_recordio_file(filename,
pass_num
=
1
,
for_parallel
=
True
):
"""
Open a RecordIO file
This layer takes a RecordIO file to read from and returns a Reader Variable.
Via the Reader Variable, we can get data from the given RecordIO file.
${comment}
Args:
filename(
str): The RecordIO file's name
.
filename(
${filename_type}): ${filename_comment}
.
shapes(list): List of tuples which declaring data shapes.
lod_levels(
list): List of ints which declaring data lod_level
.
lod_levels(
${lod_levels_type}): ${lod_levels_comment}
.
dtypes(list): List of strs which declaring data type.
pass_num(int): Number of passes to run.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
Variable: A Reader Variable via which we can get RecordIO file data
.
${out_comment}
.
Examples:
.. code-block:: python
reader = fluid.layers.io.open_recordio_file(
filename='./data.recordio',
shapes=[(3,224,224), (1)]
,
lod_levels=[0, 0
],
dtypes=['float32', 'int64'])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.io.read_file(reader)
>>> import paddle.fluid as fluid
>>> reader = fluid.layers.io.open_recordio_file(
>>> filename='./data.recordio'
,
>>> shapes=[(3,224,224), (1)
],
>>> lod_levels=[0, 0],
>>> dtypes=['float32', 'int64'])
>>>
# Via the reader, we can use 'read_file' layer to get data:
>>>
image, label = fluid.layers.io.read_file(reader)
"""
dtypes
=
[
convert_np_dtype_to_dtype_
(
dt
)
for
dt
in
dtypes
]
shape_concat
=
[]
...
...
@@ -554,6 +551,29 @@ def batch(reader, batch_size):
def
double_buffer
(
reader
,
place
=
None
,
name
=
None
):
"""
Wrap a double buffer reader. The data will copy to target place with a
double buffer queue. If the target place is None, the place that executor
perform on will be used.
Args:
reader(Variable): the reader variable need to be wrapped.
place(Place): the place of target data. Default is the sample place of
executor perform.
name(str): Variable name. None if the user does not care.
Returns:
wrapped reader with double buffer.
Examples:
>>> reader = fluid.layers.open_files(filenames=['somefile'],
>>> shapes=[[-1, 784], [-1, 1]],
>>> dtypes=['float32', 'int64'])
>>> reader = fluid.layers.double_buffer(reader)
>>> img, label = fluid.layers.read_file(reader)
"""
attrs
=
dict
()
if
place
is
not
None
:
attrs
[
'place'
]
=
str
(
place
).
upper
()
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
745ea4dc
...
...
@@ -1952,6 +1952,7 @@ def batch_norm(input,
return
helper
.
append_activation
(
batch_norm_out
)
@
templatedoc
()
def
layer_norm
(
input
,
scale
=
True
,
shift
=
True
,
...
...
@@ -1962,20 +1963,11 @@ def layer_norm(input,
act
=
None
,
name
=
None
):
"""
**Layer Normalization**
Assume feature vectors exist on dimensions
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
along these dimensions for each feature vector :math:`a` with size
:math:`H`, then normalize each feature vector using the corresponding
statistics. After that, apply learnable gain and bias on the normalized
tensor to scale and shift if :attr:`scale` and :attr:`shift` are set.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
${comment}
The formula is as follows:
.. math::
..
math::
\\
mu & =
\\
frac{1}{H}
\\
sum_{i=1}^{H} a_i
...
...
@@ -1983,6 +1975,15 @@ def layer_norm(input,
h & = f(
\\
frac{g}{
\\
sigma}(a -
\\
mu) + b)
* :math:`a`: the vector representation of the summed inputs to the neurons
in that layer.
* :math:`H`: the number of hidden units in a layers
* :math:`g`: the trainable scale parameter.
* :math:`b`: the trainable bias parameter.
Args:
input(Variable): The input tensor variable.
scale(bool): Whether to learn the adaptive gain :math:`g` after
...
...
@@ -2001,14 +2002,13 @@ def layer_norm(input,
name (str): The name of this layer. It is optional.
Returns:
Variable: A tensor variable with the same shape as the input.
${y_comment}
Examples:
.. code-block:: python
data = fluid.layers.data(
name='data', shape=[3, 32, 32],
dtype='float32')
x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
>>> data = fluid.layers.data(name='data', shape=[3, 32, 32],
>>>
dtype='float32')
>>>
x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
"""
helper
=
LayerHelper
(
'layer_norm'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -3691,29 +3691,13 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None):
return
out
@
templatedoc
()
def
row_conv
(
input
,
future_context_size
,
param_attr
=
None
,
act
=
None
):
"""Row Conv Operator. This layer will apply lookahead convolution to
**input**. The input variable should be a 2D LoDTensor with shape [T, D].
Parameters with shape [future_context_size + 1, D] will be created. The math
equation of row convolution is as follows:
.. math::
Out_{i} = \sum_{j = i} ^ {i +
\\
tau} X_{j} \odot W_{i - j}
In the above equation:
* :math:`Out_{i}`: The i-th row of output variable with shape [1, D].
* :math:`
\\
tau`: Future context size.
* :math:`X_{j}`: The j-th row of input variable with shape [1, D].
* :math:`W_{i-j}`: The (i-j)-th row of parameters with shape [1, D].
More details about row_conv please refer to the paper
\
(http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf) and
the design document
\
(https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645).
"""
${comment}
Args:
input (
Variable): Input variable, a 2D LoDTensor with shape [T, D]
.
input (
${x_type}): ${x_comment}
.
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
...
...
@@ -3721,14 +3705,13 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
act (str): Non-linear activation to be applied to output variable.
Returns:
Variable: The output tensor with same shape as input tensor
.
${out_comment}
.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[16],
dtype='float32', lod_level=1)
out = fluid.layers.row_conv(input=x, future_context_size=2)
>>> import paddle.fluid as fluid
>>> x = fluid.layers.data(name='x', shape=[16],
>>> dtype='float32', lod_level=1)
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
"""
helper
=
LayerHelper
(
'row_conv'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -3744,42 +3727,23 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
return
helper
.
append_activation
(
out
)
@
templatedoc
()
def
multiplex
(
inputs
,
index
):
"""
**Multiplex Layer**
Referring to the given index variable, this layer selects rows from the
input variables to construct a multiplex variable. Assuming that there are
:math:`m` input variables and :math:`I_i` represents the i-th input
variable and :math:`i` is in [0, :math:`m`). All input variables are
tensors with same shape [:math:`d_0`, :math:`d_1`, ..., :math:`d_R`].
Please note that rank of the input tensor should be at least 2. Each input
variable will be treated as a 2-D matrix with shape [:math:`M`, :math:`N`]
where :math:`M` for :math:`d_0` and :math:`N` for :math:`d_1` * :math:`d_2`
* ... * :math:`d_R`. Let :math:`I_i[j]` be the j-th row of the i-th input
variable. The given index variable should be a 2-D tensor with shape
[:math:`M`, 1]. Let `ID[i]` be the i-th index value of the index variable.
Then the output variable will be a tensor with shape [:math:`d_0`,
:math:`d_1`, ..., :math:`d_R`]. If we treat the output tensor as a 2-D
matrix with shape [:math:`M`, :math:`N`] and let :math:`O[i]` be the i-th
row of the matrix, then `O[i]` is equal to :math:`I_{ID[i]}[i]`.
${comment}
>>> import paddle.fluid as fluid
>>> x1 = fluid.layers.data(name='x1', shape=[4], dtype='float32')
>>> x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32')
>>> index = fluid.layers.data(name='index', shape=[1], dtype='int32')
>>> out = fluid.layers.multiplex(inputs=[x1, x2], index=index)
Args:
inputs (list): A list of variables to gather from. All variables have the
same shape and the rank is at least 2.
index (Variable): Tensor<int32>, index variable which is a 2-D tensor
with shape [M, 1] where M is the batch size.
inputs (list): ${x_comment}.
index (${ids_type}): ${ids_comment}.
Returns:
Variable: Multiplex variable gathered from input variables.
Examples:
.. code-block:: python
x1 = fluid.layers.data(name='x1', shape=[4], dtype='float32')
x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32')
index = fluid.layers.data(name='index', shape=[1], dtype='int32')
out = fluid.layers.multiplex(inputs=[x1, x2], index=index)
${out_comment}.
"""
helper
=
LayerHelper
(
'multiplex'
,
**
locals
())
...
...
python/paddle/fluid/layers/ops.py
浏览文件 @
745ea4dc
...
...
@@ -40,8 +40,6 @@ __activations__ = [
'relu6'
,
'pow'
,
'stanh'
,
'hard_shrink'
,
'thresholded_relu'
,
'hard_sigmoid'
,
'swish'
,
]
...
...
@@ -64,11 +62,9 @@ __all__ = [
'logical_or'
,
'logical_xor'
,
'logical_not'
,
'uniform_random'
,
'uniform_random_batch_size_like'
,
'gaussian_random'
,
'gaussian_random_batch_size_like'
,
'cumsum'
,
'scatter'
,
'sum'
,
'slice'
,
...
...
@@ -79,3 +75,88 @@ __all__ = [
for
_OP
in
set
(
__all__
):
globals
()[
_OP
]
=
generate_layer_fn
(
_OP
)
__all__
+=
[
"uniform_random"
]
_uniform_random_
=
generate_layer_fn
(
'uniform_random'
)
def
uniform_random
(
shape
,
dtype
=
None
,
min
=
None
,
max
=
None
,
seed
=
None
):
kwargs
=
dict
()
for
name
in
locals
():
val
=
locals
()[
name
]
if
val
is
not
None
:
kwargs
[
name
]
=
val
return
_uniform_random_
(
**
kwargs
)
uniform_random
.
__doc__
=
_uniform_random_
.
__doc__
+
"""
Examples:
>>> result = fluid.layers.uniform_random(shape=[32, 784])
"""
__all__
+=
[
'hard_shrink'
]
_hard_shrink_
=
generate_layer_fn
(
'hard_shrink'
)
def
hard_shrink
(
x
,
threshold
=
None
):
kwargs
=
dict
()
for
name
in
locals
():
val
=
locals
()[
name
]
if
val
is
not
None
:
kwargs
[
name
]
=
val
return
_hard_shrink_
(
**
kwargs
)
hard_shrink
.
__doc__
=
_hard_shrink_
.
__doc__
+
"""
Examples:
>>> data = fluid.layers.data(name="input", shape=[784])
>>> result = fluid.layers.hard_shrink(x=data, threshold=0.3)
"""
__all__
+=
[
'cumsum'
]
_cum_sum_
=
generate_layer_fn
(
'cumsum'
)
def
cumsum
(
x
,
axis
=
None
,
exclusive
=
None
,
reverse
=
None
):
kwargs
=
dict
()
for
name
in
locals
():
val
=
locals
()[
name
]
if
val
is
not
None
:
kwargs
[
name
]
=
val
return
_cum_sum_
(
**
kwargs
)
cumsum
.
__doc__
=
_cum_sum_
.
__doc__
+
"""
Examples:
>>> data = fluid.layers.data(name="input", shape=[32, 784])
>>> result = fluid.layers.cumsum(data, axis=0)
"""
__all__
+=
[
'thresholded_relu'
]
_thresholded_relu_
=
generate_layer_fn
(
'thresholded_relu'
)
def
thresholded_relu
(
x
,
threshold
=
None
):
kwargs
=
dict
()
for
name
in
locals
():
val
=
locals
()[
name
]
if
val
is
not
None
:
kwargs
[
name
]
=
val
_thresholded_relu_
(
**
kwargs
)
thresholded_relu
.
__doc__
=
_thresholded_relu_
.
__doc__
+
"""
Examples:
>>> data = fluid.layers.data(name="input", shape=[1])
>>> result = fluid.layers.thresholded_relu(data, threshold=0.4)
"""
python/paddle/fluid/layers/tensor.py
浏览文件 @
745ea4dc
...
...
@@ -6,7 +6,7 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# Unless
f
required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
...
...
@@ -51,7 +51,12 @@ def create_parameter(shape,
is_bias
=
False
,
default_initializer
=
None
):
"""
Create a parameter
Create a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
NOTE: this is a very low-level API. This API is useful when you create
operator by your self. instead of using layers.
Args:
shape(list[int]): shape of the parameter
dtype(string): element type of the parameter
...
...
@@ -63,7 +68,12 @@ def create_parameter(shape,
default_initializer(Initializer): initializer for the parameter
Returns:
Parameter: the created parameter
the created parameter.
Examples:
>>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
>>> data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
>>> hidden = fluid.layers.matmul(x=data, y=W)
"""
helper
=
LayerHelper
(
"create_parameter"
,
**
locals
())
if
attr
is
None
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录