Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
2955ff58
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2955ff58
编写于
6月 11, 2018
作者:
Y
yuyang18
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish documentation
* row_conv * uniform_random * layer_norm * create_parameter * hard_shrink * ssd_loss
上级
9328c3cf
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
115 addition
and
112 deletion
+115
-112
paddle/fluid/operators/activation_op.cc
paddle/fluid/operators/activation_op.cc
+6
-7
paddle/fluid/operators/layer_norm_op.cc
paddle/fluid/operators/layer_norm_op.cc
+17
-16
paddle/fluid/operators/row_conv_op.cc
paddle/fluid/operators/row_conv_op.cc
+17
-3
paddle/fluid/operators/uniform_random_op.cc
paddle/fluid/operators/uniform_random_op.cc
+7
-13
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+37
-26
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+18
-45
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+13
-2
未找到文件。
paddle/fluid/operators/activation_op.cc
浏览文件 @
2955ff58
...
@@ -276,13 +276,12 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -276,13 +276,12 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment
(
R"DOC(
AddComment
(
R"DOC(
HardShrink Activation Operator.
HardShrink Activation Operator.
$$
.. math::
out = \begin{cases}
out = \begin{cases}
x, \text{if } x > \lambda \\
x, \text{if } x > \lambda \\
x, \text{if } x < -\lambda \\
x, \text{if } x < -\lambda \\
0, \text{otherwise}
0, \text{otherwise}
\end{cases}
\end{cases}
$$
)DOC"
);
)DOC"
);
}
}
...
...
paddle/fluid/operators/layer_norm_op.cc
浏览文件 @
2955ff58
...
@@ -62,36 +62,33 @@ class LayerNormOp : public framework::OperatorWithKernel {
...
@@ -62,36 +62,33 @@ class LayerNormOp : public framework::OperatorWithKernel {
class
LayerNormOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
class
LayerNormOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
public:
void
Make
()
override
{
void
Make
()
override
{
AddInput
(
"X"
,
"
(LoDTensor)
The input tensor."
);
AddInput
(
"X"
,
"The input tensor."
);
AddInput
(
"Scale"
,
AddInput
(
"Scale"
,
"(
Tensor,
optional) Scale is a 1-dimensional tensor of size "
"(optional) Scale is a 1-dimensional tensor of size "
"H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
"H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
"It is applied to the output."
)
"It is applied to the output."
)
.
AsDispensable
();
.
AsDispensable
();
AddInput
(
"Bias"
,
AddInput
(
"Bias"
,
"(
Tensor,
optional) Bias is a 1-dimensional tensor of size "
"(optional) Bias is a 1-dimensional tensor of size "
"H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
"H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
"It is applied to the output."
)
"It is applied to the output."
)
.
AsDispensable
();
.
AsDispensable
();
AddOutput
(
"Y"
,
"(LoDTensor) Result after normalization."
);
AddOutput
(
"Y"
,
"Result after normalization."
);
AddOutput
(
"Mean"
,
"(Tensor) Mean of the current mini batch."
)
AddOutput
(
"Mean"
,
"Mean of the current mini batch."
).
AsIntermediate
();
.
AsIntermediate
();
AddOutput
(
"Variance"
,
"Variance of the current mini batch."
)
AddOutput
(
"Variance"
,
"(Tensor) Variance of the current mini batch."
)
.
AsIntermediate
();
.
AsIntermediate
();
AddAttr
<
float
>
(
"epsilon"
,
AddAttr
<
float
>
(
"epsilon"
,
"(float, default 1e-5) Constant for "
"Constant for numerical stability [default 1e-5]."
)
"numerical stability"
)
.
SetDefault
(
1e-5
)
.
SetDefault
(
1e-5
)
.
AddCustomChecker
([](
const
float
&
epsilon
)
{
.
AddCustomChecker
([](
const
float
&
epsilon
)
{
PADDLE_ENFORCE
(
epsilon
>=
0.0
f
&&
epsilon
<=
0.001
f
,
PADDLE_ENFORCE
(
epsilon
>=
0.0
f
&&
epsilon
<=
0.001
f
,
"'epsilon' should be between 0.0 and 0.001."
);
"'epsilon' should be between 0.0 and 0.001."
);
});
});
AddAttr
<
int
>
(
"begin_norm_axis"
,
AddAttr
<
int
>
(
"begin_norm_axis"
,
"(int default:1), the "
"the axis of `begin_norm_axis ... Rank(X) - 1` will be "
"axis of `begin_norm_axis ... Rank(X) - 1` will be "
"normalized. `begin_norm_axis` splits the tensor(`X`) to a "
"normalized. `begin_norm_axis` splits the tensor(`X`) to a "
"matrix [N,H]."
)
"matrix [N,H].
[default 1].
"
)
.
SetDefault
(
1
)
.
SetDefault
(
1
)
.
AddCustomChecker
([](
const
int
&
begin_norm_axis
)
{
.
AddCustomChecker
([](
const
int
&
begin_norm_axis
)
{
PADDLE_ENFORCE_GT
(
begin_norm_axis
,
0
,
PADDLE_ENFORCE_GT
(
begin_norm_axis
,
0
,
...
@@ -99,10 +96,14 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -99,10 +96,14 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker {
});
});
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Layer Normalization.
Assume feature vectors exist on dimensions
Layer Norm has been implemented as discussed in the paper:
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
https://arxiv.org/abs/1607.06450
along these dimensions for each feature vector :math:`a` with size
...
:math:`H`, then normalize each feature vector using the corresponding
statistics. After that, apply learnable gain and bias on the normalized
tensor to scale and shift if :attr:`scale` and :attr:`shift` are set.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
)DOC"
);
)DOC"
);
}
}
};
};
...
...
paddle/fluid/operators/row_conv_op.cc
浏览文件 @
2955ff58
...
@@ -78,18 +78,18 @@ class RowConvOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -78,18 +78,18 @@ class RowConvOpMaker : public framework::OpProtoAndCheckerMaker {
public:
public:
void
Make
()
override
{
void
Make
()
override
{
AddInput
(
"X"
,
AddInput
(
"X"
,
"
(LoDTensor),
the input(X) is a LodTensor, which supports "
"the input(X) is a LodTensor, which supports "
"variable time-length input sequences. The underlying tensor "
"variable time-length input sequences. The underlying tensor "
"in this LoDTensor is a matrix with shape (T x N), where T "
"in this LoDTensor is a matrix with shape (T x N), where T "
"is the total time steps in this mini-batch and N is the input "
"is the total time steps in this mini-batch and N is the input "
"data dimension."
);
"data dimension."
);
AddInput
(
"Filter"
,
AddInput
(
"Filter"
,
"
(Tensor),
the input(Filter) is a learnable parameter. It "
"the input(Filter) is a learnable parameter. It "
"is a 2-D tensor with shape (future_context x N), where, "
"is a 2-D tensor with shape (future_context x N), where, "
"future_context is the future context length and N is the data "
"future_context is the future context length and N is the data "
"dimension."
);
"dimension."
);
AddOutput
(
"Out"
,
AddOutput
(
"Out"
,
"
(LoDTensor),
the output(Out) is a LodTensor, which supports "
"the output(Out) is a LodTensor, which supports "
"variable time-length input sequences. The underlying tensor "
"variable time-length input sequences. The underlying tensor "
"in this LodTensor is a matrix with shape T x N, i.e., the "
"in this LodTensor is a matrix with shape T x N, i.e., the "
"same shape as X."
);
"same shape as X."
);
...
@@ -117,6 +117,20 @@ $$
...
@@ -117,6 +117,20 @@ $$
out_{i, :} = \sum_{j=i}^{i + context} in_{j,:} \dot W_{i-j, :}
out_{i, :} = \sum_{j=i}^{i + context} in_{j,:} \dot W_{i-j, :}
$$
$$
In the above equation:
* $Out_{i}$: The i-th row of output variable with shape [1, D].
* $\\tau$: Future context size.
* $X_{j}$: The j-th row of input variable with shape [1, D].
* $W_{i-j}$: The (i-j)-th row of parameters with shape [1, D].
More details about row_conv please refer to
the design document
https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .
)DOC"
);
)DOC"
);
}
}
};
};
...
...
paddle/fluid/operators/uniform_random_op.cc
浏览文件 @
2955ff58
...
@@ -86,32 +86,26 @@ class UniformRandomOp : public framework::OperatorWithKernel {
...
@@ -86,32 +86,26 @@ class UniformRandomOp : public framework::OperatorWithKernel {
class
UniformRandomOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
class
UniformRandomOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
public:
void
Make
()
override
{
void
Make
()
override
{
AddOutput
(
"Out"
,
"
(Tensor)
The output tensor of uniform random op"
);
AddOutput
(
"Out"
,
"The output tensor of uniform random op"
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Uniform random operator.
Uniform random operator.
This operator initializes a tensor with random values sampled from a
This operator initializes a tensor with random values sampled from a
uniform distribution.
uniform distribution.
The random result is in set [min, max].
)DOC"
);
)DOC"
);
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
"The shape of the output tensor"
);
"(vector<int>) The shape of the output tensor"
);
AddAttr
<
float
>
(
"min"
,
"Minimum value of uniform random. [default -1.0]."
)
AddAttr
<
float
>
(
"min"
,
"(float, default -1.0) "
"Minimum value of uniform random"
)
.
SetDefault
(
-
1.0
f
);
.
SetDefault
(
-
1.0
f
);
AddAttr
<
float
>
(
"max"
,
AddAttr
<
float
>
(
"max"
,
"Maximun value of uniform random. [default 1.0]."
)
"(float, default 1.0) "
"Maximun value of uniform random"
)
.
SetDefault
(
1.0
f
);
.
SetDefault
(
1.0
f
);
AddAttr
<
int
>
(
"seed"
,
AddAttr
<
int
>
(
"seed"
,
"(int, default 0) "
"Random seed used for generating samples. "
"Random seed used for generating samples. "
"0 means use a seed generated by the system."
"0 means use a seed generated by the system."
"Note that if seed is not 0, this operator will always "
"Note that if seed is not 0, this operator will always "
"generate the same random numbers every time."
)
"generate the same random numbers every time.
[default 0].
"
)
.
SetDefault
(
0
);
.
SetDefault
(
0
);
AddAttr
<
int
>
(
"dtype"
,
"
(int, default 5(FP32)) Output tensor data type
"
)
AddAttr
<
int
>
(
"dtype"
,
"
Output tensor data type. [default 5(FP32)].
"
)
.
SetDefault
(
framework
::
proto
::
VarType
::
FP32
);
.
SetDefault
(
framework
::
proto
::
VarType
::
FP32
);
}
}
};
};
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
2955ff58
...
@@ -373,22 +373,55 @@ def ssd_loss(location,
...
@@ -373,22 +373,55 @@ def ssd_loss(location,
confidence loss (or classification loss) by performing the following steps:
confidence loss (or classification loss) by performing the following steps:
1. Find matched boundding box by bipartite matching algorithm.
1. Find matched boundding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched boundding box by bipartite matching algorithm.
1.2 Compute matched boundding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
3. Apply hard example mining to get the negative example indices and update
the matched indices.
the matched indices.
4. Assign classification and regression targets
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.2. Assign regression targets.
4.3. Assign classification targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.1 Compute confidence loss.
5.1 Compute localization loss.
5.1 Compute localization loss.
5.3 Compute the overall weighted loss.
5.3 Compute the overall weighted loss.
>>> import paddle.fluid.layers as layers
>>> pb = layers.data(
>>> name='prior_box',
>>> shape=[10, 4],
>>> append_batch_size=False,
>>> dtype='float32')
>>> pbv = layers.data(
>>> name='prior_box_var',
>>> shape=[10, 4],
>>> append_batch_size=False,
>>> dtype='float32')
>>> loc = layers.data(name='target_box', shape=[10, 4], dtype='float32')
>>> scores = layers.data(name='scores', shape=[10, 21], dtype='float32')
>>> gt_box = layers.data(
>>> name='gt_box', shape=[4], lod_level=1, dtype='float32')
>>> gt_label = layers.data(
>>> name='gt_label', shape=[1], lod_level=1, dtype='float32')
>>> loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
Args:
Args:
location (Variable): The location predictions are a 3D Tensor with
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
shape [N, Np, 4], N is the batch size, Np is total number of
...
@@ -426,34 +459,12 @@ def ssd_loss(location,
...
@@ -426,34 +459,12 @@ def ssd_loss(location,
mining_type is 'hard_example'.
mining_type is 'hard_example'.
Returns:
Returns:
Variable: The weighted sum of the localization loss and confidence loss,
The weighted sum of the localization loss and confidence loss, with
\
with shape [N * Np, 1], N and Np are the same as they are
shape [N * Np, 1], N and Np are the same as they are in `location`.
in `location`.
Raises:
Raises:
ValueError: If mining_type is 'hard_example', now only support
ValueError: If mining_type is 'hard_example', now only support mining
\
mining type of `max_negative`.
type of `max_negative`.
Examples:
.. code-block:: python
pb = layers.data(
name='prior_box',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
pbv = layers.data(
name='prior_box_var',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
loc = layers.data(name='target_box', shape=[10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = layers.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = layers.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
"""
helper
=
LayerHelper
(
'ssd_loss'
,
**
locals
())
helper
=
LayerHelper
(
'ssd_loss'
,
**
locals
())
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
2955ff58
...
@@ -1624,6 +1624,7 @@ def batch_norm(input,
...
@@ -1624,6 +1624,7 @@ def batch_norm(input,
return
helper
.
append_activation
(
batch_norm_out
)
return
helper
.
append_activation
(
batch_norm_out
)
@
templatedoc
()
def
layer_norm
(
input
,
def
layer_norm
(
input
,
scale
=
True
,
scale
=
True
,
shift
=
True
,
shift
=
True
,
...
@@ -1634,16 +1635,7 @@ def layer_norm(input,
...
@@ -1634,16 +1635,7 @@ def layer_norm(input,
act
=
None
,
act
=
None
,
name
=
None
):
name
=
None
):
"""
"""
**Layer Normalization**
${comment}
Assume feature vectors exist on dimensions
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
along these dimensions for each feature vector :math:`a` with size
:math:`H`, then normalize each feature vector using the corresponding
statistics. After that, apply learnable gain and bias on the normalized
tensor to scale and shift if :attr:`scale` and :attr:`shift` are set.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
The formula is as follows:
The formula is as follows:
...
@@ -1655,6 +1647,11 @@ def layer_norm(input,
...
@@ -1655,6 +1647,11 @@ def layer_norm(input,
h & = f(
\\
frac{g}{
\\
sigma}(a -
\\
mu) + b)
h & = f(
\\
frac{g}{
\\
sigma}(a -
\\
mu) + b)
>>> import paddle.fluid as fluid
>>> data = fluid.layers.data(name='data', shape=[3, 32, 32],
>>> dtype='float32')
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
Args:
Args:
input(Variable): The input tensor variable.
input(Variable): The input tensor variable.
scale(bool): Whether to learn the adaptive gain :math:`g` after
scale(bool): Whether to learn the adaptive gain :math:`g` after
...
@@ -1672,14 +1669,7 @@ def layer_norm(input,
...
@@ -1672,14 +1669,7 @@ def layer_norm(input,
act(str): Activation to be applied to the output of layer normalizaiton.
act(str): Activation to be applied to the output of layer normalizaiton.
Returns:
Returns:
Variable: A tensor variable with the same shape as the input.
${y_comment}
Examples:
.. code-block:: python
data = fluid.layers.data(
name='data', shape=[3, 32, 32], dtype='float32')
x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
"""
"""
helper
=
LayerHelper
(
'layer_norm'
,
**
locals
())
helper
=
LayerHelper
(
'layer_norm'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
...
@@ -3184,29 +3174,19 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None):
...
@@ -3184,29 +3174,19 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None):
return
out
return
out
@
templatedoc
()
def
row_conv
(
input
,
future_context_size
,
param_attr
=
None
,
act
=
None
):
def
row_conv
(
input
,
future_context_size
,
param_attr
=
None
,
act
=
None
):
"""Row Conv Operator. This layer will apply lookahead convolution to
"""
**input**. The input variable should be a 2D LoDTensor with shape [T, D].
${comment}
Parameters with shape [future_context_size + 1, D] will be created. The math
equation of row convolution is as follows:
.. math::
Out_{i} = \sum_{j = i} ^ {i +
\\
tau} X_{j} \odot W_{i - j}
In the above equation:
* :math:`Out_{i}`: The i-th row of output variable with shape [1, D].
>>> import paddle.fluid as fluid
* :math:`
\\
tau`: Future context size.
>>> x = fluid.layers.data(name='x', shape=[16],
* :math:`X_{j}`: The j-th row of input variable with shape [1, D].
>>> dtype='float32', lod_level=1)
* :math:`W_{i-j}`: The (i-j)-th row of parameters with shape [1, D].
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
More details about row_conv please refer to the paper
\
(http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf) and
the design document
\
(https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645).
Args:
Args:
input (
Variable): Input variable, a 2D LoDTensor with shape [T, D]
.
input (
${x_type}): ${x_comment}
.
future_context_size (int): Future context size. Please note, the shape
future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D].
of convolution kernel is [future_context_size + 1, D].
param_attr (ParamAttr): Attributes of parameters, including
param_attr (ParamAttr): Attributes of parameters, including
...
@@ -3214,14 +3194,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
...
@@ -3214,14 +3194,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
act (str): Non-linear activation to be applied to output variable.
act (str): Non-linear activation to be applied to output variable.
Returns:
Returns:
Variable: The output tensor with same shape as input tensor.
${out_comment}.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[16],
dtype='float32', lod_level=1)
out = fluid.layers.row_conv(input=x, future_context_size=2)
"""
"""
helper
=
LayerHelper
(
'row_conv'
,
**
locals
())
helper
=
LayerHelper
(
'row_conv'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
2955ff58
...
@@ -49,7 +49,18 @@ def create_parameter(shape,
...
@@ -49,7 +49,18 @@ def create_parameter(shape,
is_bias
=
False
,
is_bias
=
False
,
default_initializer
=
None
):
default_initializer
=
None
):
"""
"""
Create a parameter
Create a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
NOTE: this is a very low-level API. This API is useful when you create
operator by your self. instead of using layers.
>>> import paddle.fluid as fluid
>>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
>>> data = fluid.layers.data(name="img", shape=[64, 784],
>>> append_batch_size=False)
>>> hidden = fluid.layers.matmul(x=data, y=W)
Args:
Args:
shape(list[int]): shape of the parameter
shape(list[int]): shape of the parameter
dtype(string): element type of the parameter
dtype(string): element type of the parameter
...
@@ -61,7 +72,7 @@ def create_parameter(shape,
...
@@ -61,7 +72,7 @@ def create_parameter(shape,
default_initializer(Initializer): initializer for the parameter
default_initializer(Initializer): initializer for the parameter
Returns:
Returns:
Parameter:
the created parameter
the created parameter
"""
"""
helper
=
LayerHelper
(
"create_parameter"
,
**
locals
())
helper
=
LayerHelper
(
"create_parameter"
,
**
locals
())
if
attr
is
None
:
if
attr
is
None
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录