Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
b0b26dab
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 2 年 前同步成功
通知
708
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b0b26dab
编写于
11月 03, 2017
作者:
A
Abhinav Arora
提交者:
Yi Wang
11月 03, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish operator documentation (#5356)
* Polish the documentation for uniform_random and top_k ops * Polishing more operators
上级
7408a4c4
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
113 addition
and
87 deletion
+113
-87
paddle/operators/save_op.cc
paddle/operators/save_op.cc
+10
-5
paddle/operators/scale_op.cc
paddle/operators/scale_op.cc
+8
-5
paddle/operators/sequence_concat_op.cc
paddle/operators/sequence_concat_op.cc
+35
-33
paddle/operators/sgd_op.cc
paddle/operators/sgd_op.cc
+8
-6
paddle/operators/sign_op.cc
paddle/operators/sign_op.cc
+3
-2
paddle/operators/split_op.cc
paddle/operators/split_op.cc
+24
-16
paddle/operators/squared_l2_distance_op.cc
paddle/operators/squared_l2_distance_op.cc
+16
-13
paddle/operators/squared_l2_norm_op.cc
paddle/operators/squared_l2_norm_op.cc
+2
-2
paddle/operators/sum_op.cc
paddle/operators/sum_op.cc
+7
-5
未找到文件。
paddle/operators/save_op.cc
浏览文件 @
b0b26dab
...
...
@@ -163,14 +163,19 @@ class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker {
SaveOpProtoMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"The tensor need to be saved"
);
AddComment
(
R"DOC(Save operator
Save operator will serialize and write a tensor variable to disk file.
AddInput
(
"X"
,
"(Tensor ) Input tensor to be saved"
);
AddComment
(
R"DOC(
Save operator
This operator will serialize and write a tensor variable to file on disk.
)DOC"
);
AddAttr
<
bool
>
(
"overwrite"
,
"Overwrite the output file if exist"
)
AddAttr
<
bool
>
(
"overwrite"
,
"(boolean, default true)"
"Overwrite the output file if exist"
)
.
SetDefault
(
true
);
AddAttr
<
std
::
string
>
(
"file_path"
,
"Variable will be saved to
\"
file_path
\"
."
)
"(string)"
"The
\"
file_path
\"
where the variable will be saved."
)
.
AddCustomChecker
(
[](
const
std
::
string
&
path
)
{
return
!
path
.
empty
();
});
}
...
...
paddle/operators/scale_op.cc
浏览文件 @
b0b26dab
...
...
@@ -40,13 +40,16 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ScaleOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"The input tensor of scale operator."
);
AddOutput
(
"Out"
,
"The output tensor of scale operator."
);
AddComment
(
R"DOC(Scale operator
AddInput
(
"X"
,
"(Tensor) Input tensor of scale operator."
);
AddOutput
(
"Out"
,
"(Tensor) Output tensor of scale operator."
);
AddComment
(
R"DOC(
Scale operator
The equation is: Out = scale*X
$$Out = scale*X$$
)DOC"
);
AddAttr
<
AttrType
>
(
"scale"
,
"The scaling factor of the scale operator."
)
AddAttr
<
AttrType
>
(
"scale"
,
"(float, default 0)"
"The scaling factor of the scale operator."
)
.
SetDefault
(
1.0
);
}
};
...
...
paddle/operators/sequence_concat_op.cc
浏览文件 @
b0b26dab
...
...
@@ -47,19 +47,19 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker {
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"(
A vector of LoDTensor), the i
nput is a vector of LoDTensor, "
"(
vector<LoDTensor>) I
nput is a vector of LoDTensor, "
"each of which is a variable-length sequence or nested sequence."
)
.
AsDuplicable
();
AddOutput
(
"Out"
,
"(
A LoDTensor), the v
ariable-length output of "
"(
LoDTensor), V
ariable-length output of "
"sequence_concat Op."
);
AddAttr
<
int
>
(
"axis"
,
"(int, default 0)"
"The axis
which the inputs will be joined with
. "
"(int, default 0)
"
"The axis
along which the inputs will be joined
. "
"If axis is 0, the inputs will be joined with LoD index."
)
.
SetDefault
(
0
);
AddAttr
<
int
>
(
"level"
,
"(int, default 0)"
"(int, default 0)
"
"The level at which the inputs will be joined. "
"If the level is 0, the inputs will be joined at the nested "
"sequence level. "
...
...
@@ -68,34 +68,36 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker {
"The level should be less than the level number of inputs."
)
.
SetDefault
(
0
);
AddComment
(
R"DOC(
The sequence_concat operator concatenates multiple LoDTensors.
It only supports sequence (LoD Tensor with level number is 1)
or a nested sequence (LoD tensor with level number is 2) as its input.
- Case1:
If the axis is other than 0(here, axis is 1 and level is 1),
each input should have the same LoD information and the LoD
information of the output keeps the same as the input.
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4)
LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4)
- Case2:
If the axis is 0(here, leve is 0), the inputs are concatenated along
time steps, the LoD information of the output need to re-compute.
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4)
LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4)
- Case3:
If the axis is 0(here, level is 1).
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4)
LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4)
NOTE: The levels of all the inputs should be the same.
Sequence Concat operator
The sequence_concat operator concatenates multiple LoDTensors.
It only supports sequence (LoD Tensor with level number is 1)
or a nested sequence (LoD tensor with level number is 2) as its input.
- Case1:
If the axis is other than 0(here, axis is 1 and level is 1),
each input should have the same LoD information and the LoD
information of the output keeps the same as the input.
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,2,4}, {0,1,2,3,4}}; Dims(x1) = (4,4,4)
LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4)
- Case2:
If the axis is 0(here, leve is 0), the inputs are concatenated along
time steps, the LoD information of the output need to re-compute.
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,3,5}, {0,1,2,3,5}}; Dims(x1) = (5,3,4)
LoD(Out) = {{0,5,9}, {0,1,2,3,4,5,6,7,9}}; Dims(Out) = (9,3,4)
- Case3:
If the axis is 0(here, level is 1).
LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4)
LoD(x1) = {{0,3,5}, {0,1,3,4,5}}; Dims(x1) = (5,3,4)
LoD(Out) = {{0,5,9}, {0,2,5,7,9}}; Dims(Out) = (9,3,4)
NOTE: The levels of all the inputs should be the same.
)DOC"
);
}
};
...
...
paddle/operators/sgd_op.cc
浏览文件 @
b0b26dab
...
...
@@ -45,15 +45,17 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SGDOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"Param"
,
"Input parameter"
);
AddInput
(
"LearningRate"
,
"Learning rate of SGD"
);
AddInput
(
"Grad"
,
"Input gradient"
);
AddOutput
(
"ParamOut"
,
"
o
utput parameter"
);
AddInput
(
"Param"
,
"
(Tensor)
Input parameter"
);
AddInput
(
"LearningRate"
,
"
(Tensor)
Learning rate of SGD"
);
AddInput
(
"Grad"
,
"
(Tensor)
Input gradient"
);
AddOutput
(
"ParamOut"
,
"
(Tensor) O
utput parameter"
);
AddComment
(
R"DOC(
S
implest sgd algorithm.
S
GD operator
param_out = param - learning_rate * grad;
This operator implements one step of the stochastic gradient descent algorithm.
$$param_out = param - learning_rate * grad$$
)DOC"
);
}
...
...
paddle/operators/sign_op.cc
浏览文件 @
b0b26dab
...
...
@@ -38,9 +38,10 @@ class SignOpMaker : public framework::OpProtoAndCheckerMaker {
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"(Tensor) Input tensor of sign operator."
);
AddOutput
(
"Out"
,
"(Tensor) Output tensor of sign operator."
);
AddComment
(
R"DOC(Sign operator
AddComment
(
R"DOC(
Sign operator
The equation is: Out = X.sign()
$$Out = X.sign()$$
)DOC"
);
}
};
...
...
paddle/operators/split_op.cc
浏览文件 @
b0b26dab
...
...
@@ -67,30 +67,38 @@ class SplitOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SplitOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"the input tensor of split operator."
);
AddOutput
(
"Out"
,
"the output tensors of split operator."
).
AsDuplicable
();
AddInput
(
"X"
,
"(Tensor) Input tensor of the split operator."
);
AddOutput
(
"Out"
,
"(Tensor) Output tensors of the split operator."
)
.
AsDuplicable
();
AddComment
(
R"DOC(
Split the input tensor into multiple sub-tensors.
Example:
Input = [[1,2],
[3,4],
[5,6]]
sections = [2,1]
axis = 0
Output[0] = [[1,2],
[3,4]]
Output[1] = [[5,6]]
Split operator
This operator splits the input tensor into multiple sub-tensors.
Example:
Input = [[1,2],
[3,4],
[5,6]]
sections = [2,1]
axis = 0
Output[0] = [[1,2],
[3,4]]
Output[1] = [[5,6]]
)DOC"
);
AddAttr
<
std
::
vector
<
int
>>
(
"sections"
,
"the length for each"
"output along with the specify axis."
)
"(vector<int>) "
"the length of each output along the "
"specified axis."
)
.
SetDefault
(
std
::
vector
<
int
>
{});
AddAttr
<
int
>
(
"num"
,
"number of the sub-tensors, it must evenly divide "
"(int, default 0)"
"Number of sub-tensors. This must evenly divide "
"Input.dims()[axis]"
)
.
SetDefault
(
0
);
AddAttr
<
int
>
(
"axis"
,
"The axis which the input will be splited on."
)
AddAttr
<
int
>
(
"axis"
,
"(int, default 0) "
"The axis which the input will be splited on."
)
.
SetDefault
(
0
);
}
};
...
...
paddle/operators/squared_l2_distance_op.cc
浏览文件 @
b0b26dab
...
...
@@ -59,23 +59,26 @@ class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker {
SquaredL2DistanceOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"Input of SquaredL2DistanceOp."
);
AddInput
(
"Y"
,
"Target of SquaredL2DistanceOp."
);
AddInput
(
"X"
,
"
(Tensor)
Input of SquaredL2DistanceOp."
);
AddInput
(
"Y"
,
"
(Tensor)
Target of SquaredL2DistanceOp."
);
AddOutput
(
"sub_result"
,
"
Buffering subs
traction result which "
"
(Tensor) Buffering sub
traction result which "
"will be reused in backward."
)
.
AsIntermediate
();
AddOutput
(
"Out"
,
"Squared l2 distance between input and target."
);
AddOutput
(
"Out"
,
"
(Tensor)
Squared l2 distance between input and target."
);
AddComment
(
R"DOC(
SquaredL2DistanceOp will cacluate the squared L2 distance for
input and target. Number of distance value equals to the
first dimension of input. First dimension of target could be equal to
input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp
will broadcast target's first dimension to input's first dimension.
You can decide whether calculate the gradient of input and target.
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input X.
SquaredL2Distance operator
This operator will cacluate the squared L2 distance for the input and
the target. Number of distance value will be equal to the first dimension
of input. First dimension of the target could be equal to the input or to 1.
If the first dimension of target is 1, the operator will broadcast target's
first dimension to input's first dimension. During backward propagation,
the user can decide whether to calculate the gradient of the input or
the target or both.
Both the input X and Y can carry the LoD (Level of Details) information.
However, the output only shares the LoD information with input X.
)DOC"
);
}
};
...
...
paddle/operators/squared_l2_norm_op.cc
浏览文件 @
b0b26dab
...
...
@@ -52,13 +52,13 @@ class SquaredL2NormOpMaker : public framework::OpProtoAndCheckerMaker {
framework
::
OpAttrChecker
*
op_checker
)
:
framework
::
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"(Tensor) The input of squared_l2_norm op."
);
AddOutput
(
"Out"
,
"(
Float
) The output of squared_l2_norm op."
);
AddOutput
(
"Out"
,
"(
Scalar
) The output of squared_l2_norm op."
);
AddComment
(
R"DOC(
SquaredL2Norm Operator.
Computes the squared L2 norm of a tensor.
Out = sum (X ** 2)
$$Out = \sum_{i} X_{i}^2$$
)DOC"
);
}
...
...
paddle/operators/sum_op.cc
浏览文件 @
b0b26dab
...
...
@@ -45,13 +45,15 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SumOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"the input tensors of sum operator."
).
AsDuplicable
();
AddOutput
(
"Out"
,
"the output tensor of sum operator."
);
AddInput
(
"X"
,
"(vector<Tensor>) The input tensors of sum operator."
)
.
AsDuplicable
();
AddOutput
(
"Out"
,
"(Tensor) The output tensor of sum operator."
);
AddComment
(
R"DOC(
Sum
the input tensors
.
Sum
operator
.
All the inputs can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with the first input.
This operators sums the input tensors. All the inputs can carry the
LoD (Level of Details) information. However, the output only shares
the LoD information with the first input.
)DOC"
);
}
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录