Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
6a07af06
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6a07af06
编写于
11月 03, 2017
作者:
K
Kexin Zhao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
polish doc c to d
上级
83c22816
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
78 addition
and
56 deletion
+78
-56
paddle/operators/accuracy_op.cc
paddle/operators/accuracy_op.cc
+13
-9
paddle/operators/conv_cudnn_op.cc
paddle/operators/conv_cudnn_op.cc
+1
-1
paddle/operators/cos_sim_op.cc
paddle/operators/cos_sim_op.cc
+7
-6
paddle/operators/crop_op.cc
paddle/operators/crop_op.cc
+22
-21
paddle/operators/cross_entropy_op.cc
paddle/operators/cross_entropy_op.cc
+7
-6
paddle/operators/decayed_adagrad_op.cc
paddle/operators/decayed_adagrad_op.cc
+10
-3
paddle/operators/dropout_op.cc
paddle/operators/dropout_op.cc
+8
-6
paddle/operators/dynamic_recurrent_op.cc
paddle/operators/dynamic_recurrent_op.cc
+10
-4
未找到文件。
paddle/operators/accuracy_op.cc
浏览文件 @
6a07af06
...
@@ -33,7 +33,7 @@ class AccuracyOp : public framework::OperatorWithKernel {
...
@@ -33,7 +33,7 @@ class AccuracyOp : public framework::OperatorWithKernel {
auto
inference_dim
=
ctx
->
GetInputDim
(
"Out"
);
auto
inference_dim
=
ctx
->
GetInputDim
(
"Out"
);
auto
label_dim
=
ctx
->
GetInputDim
(
"Label"
);
auto
label_dim
=
ctx
->
GetInputDim
(
"Label"
);
// Assume indices has same shape
with inferne
ce, because
// Assume indices has same shape
as inferen
ce, because
// it's the output of topk.
// it's the output of topk.
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
"label's rank must be 2."
);
PADDLE_ENFORCE_EQ
(
label_dim
.
size
(),
2
,
"label's rank must be 2."
);
...
@@ -60,20 +60,24 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -60,20 +60,24 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker {
framework
::
OpAttrChecker
*
op_checker
)
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
// TODO(typhoonzero): support both inference value and indices.
// TODO(typhoonzero): support both inference value and indices.
AddInput
(
"Out"
,
"
topk (inferences) the network output
"
);
AddInput
(
"Out"
,
"
The network output of topk (inferences)
"
);
AddInput
(
"Indices"
,
"
topk (indices) the network output
"
);
AddInput
(
"Indices"
,
"
The the network output of topk (indices)
"
);
AddInput
(
"Label"
,
"Label of the training data"
);
AddInput
(
"Label"
,
"Label of the training data"
);
// TODO(typhoonzero): AddInput("Weight", ...
// TODO(typhoonzero): AddInput("Weight", ...
AddOutput
(
"Accuracy"
,
"The accuracy of current batch"
);
AddOutput
(
"Accuracy"
,
"The accuracy of current batch"
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Accuracy. It will print accuracy rate for classification.
Accuracy Operator.
The accuracy is:
.. math::
It will print accuracy rate for classification.
accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples})
The accuracy is calculated as follows:
$$accuracy = \frac{NumOfCorrectPredicts}{NumOfAllSamples}$$
Both the input Out and Label can carry the LoD (Level of Details)
information, or not. But the output only shares the LoD information
with the input Out(Inference).
Both the input `Out` and `Label` can carry the LoD (Level of Details)
information, or not. But the output only shares the LoD with input `Inference`.
)DOC"
);
)DOC"
);
}
}
};
};
...
...
paddle/operators/conv_cudnn_op.cc
浏览文件 @
6a07af06
...
@@ -29,7 +29,7 @@ class CudnnConvOpMaker : public Conv2DOpMaker {
...
@@ -29,7 +29,7 @@ class CudnnConvOpMaker : public Conv2DOpMaker {
"workspace is a section of GPU memory which will be "
"workspace is a section of GPU memory which will be "
"allocated/freed each time the operator runs, larger "
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"workspace size can increase performance but also requires "
"better hardwar
d. This size should be carefully setted
."
)
"better hardwar
e. This size should be chosen carefully
."
)
.
SetDefault
(
4096
);
.
SetDefault
(
4096
);
}
}
};
};
...
...
paddle/operators/cos_sim_op.cc
浏览文件 @
6a07af06
...
@@ -79,15 +79,16 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -79,15 +79,16 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Cosine Similarity Operator.
Cosine Similarity Operator.
The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)).
$Out = X^T * Y / (\sqrt{X^T * X} * \sqrt{Y^T * Y})$
The input
`X` and `Y`
must have the same shape, except that the 1st dimension
The input
X and Y
must have the same shape, except that the 1st dimension
of input
`Y` could be just 1 (different from input `X`
), which will be
of input
Y could be just 1 (different from input X
), which will be
broadcasted to match the shape of input
`X`
before computing their cosine
broadcasted to match the shape of input
X
before computing their cosine
similarity.
similarity.
Both the input `X` and `Y` can carry the LoD (Level of Details) information,
Both the input X and Y can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input `X`.
or not. But the output only shares the LoD information with input X.
)DOC"
);
)DOC"
);
}
}
};
};
...
...
paddle/operators/crop_op.cc
浏览文件 @
6a07af06
...
@@ -56,34 +56,35 @@ class CropOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -56,34 +56,35 @@ class CropOpMaker : public framework::OpProtoAndCheckerMaker {
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
AddInput
(
"X"
,
"The input of pad op. "
"The input of pad op. "
"The input should be a k-D tensor(k > 0 and k < 7)"
);
"The input should be a k-D tensor(k > 0 and k < 7)
.
"
);
AddInput
(
"Y"
,
AddInput
(
"Y"
,
"The input used as reference for cropping"
"The input used as reference for cropping
,
"
"
with the same dimension as X.
"
)
"
which is of the same dimensions as X.
"
)
.
AsDispensable
();
.
AsDispensable
();
AddOutput
(
"Out"
,
AddOutput
(
"Out"
,
"The output of crop op "
"The output of crop op
,
"
"w
ith the same dimension
as X."
);
"w
hich is of the same dimensions
as X."
);
AddAttr
<
std
::
vector
<
int
>>
(
"offsets"
,
AddAttr
<
std
::
vector
<
int
>>
(
"offsets"
,
"A list<int> describing offsets to be cropped."
"A list<int> describing offsets to be cropped.
"
"The size of offsets list should be
as
same as "
"The size of offsets list should be
the
same as "
"
dimension size of
input X."
);
"
the dimension size of
input X."
);
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
"A list<int> describing the shape of output."
"A list<int> describing the shape of output.
"
"The size of shape list should be
as
same as "
"The size of shape list should be
the
same as "
"
dimension size of
input X."
)
"
the dimension size of
input X."
)
.
SetDefault
(
std
::
vector
<
int
>
());
.
SetDefault
(
std
::
vector
<
int
>
());
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Crop Operator.
Crop Operator.
Crop input into output, as specified by offsets and shape.
Crop input into output, as specified by offsets and shape.
There are two ways to set shape:
There are two ways to set shape:
1. referenc
input: crop input X as
shape as reference input.
1. referenc
e input: crop input X into the same
shape as reference input.
The dimension of reference input should
The dimension of reference input should
be
as same as
input X.
be
the same as the dimension of
input X.
2. shape list: crop input X
by
shape described by a list<int>.
2. shape list: crop input X
into the
shape described by a list<int>.
The size of shape list should be
as
same as
The size of shape list should be
the
same as
dimension size of
input X.
the dimension size of
input X.
The input should be a k-D tensor(k > 0 and k < 7). As an example:
The input should be a k-D tensor(k > 0 and k < 7). As an example:
...
@@ -91,20 +92,20 @@ Given:
...
@@ -91,20 +92,20 @@ Given:
X = [[0, 1, 2, 0, 0]
X = [[0, 1, 2, 0, 0]
[0, 3, 4, 0, 0]
[0, 3, 4, 0, 0]
[0, 0, 0, 0, 0]]
[0, 0, 0, 0, 0]]
,
and
and
offsets = [0, 1]
offsets = [0, 1]
,
and
and
shape = [2, 2]
shape = [2, 2]
,
then we get
we get:
Out = [[1, 2],
Out = [[1, 2],
[3, 4]]
[3, 4]]
.
)DOC"
);
)DOC"
);
}
}
...
...
paddle/operators/cross_entropy_op.cc
浏览文件 @
6a07af06
...
@@ -117,9 +117,9 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -117,9 +117,9 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker {
"Label"
,
"Label"
,
"(Tensor, default Tensor<int>), the ground truth which is "
"(Tensor, default Tensor<int>), the ground truth which is "
"a 2-D tensor. "
"a 2-D tensor. "
"When soft_label is set to false,
`Label`
is a Tensor<int> with shape "
"When soft_label is set to false,
Label
is a Tensor<int> with shape "
"[N x 1]. "
"[N x 1]. "
"When soft_label is set to true,
`Label`
is a Tensor<float/double> "
"When soft_label is set to true,
Label
is a Tensor<float/double> "
"with shape [N x K]."
);
"with shape [N x K]."
);
AddOutput
(
"Y"
,
AddOutput
(
"Y"
,
"(Tensor, default Tensor<float>), a 2-D tensor "
"(Tensor, default Tensor<float>), a 2-D tensor "
...
@@ -137,13 +137,13 @@ computation.
...
@@ -137,13 +137,13 @@ computation.
1) One-hot cross-entropy:
1) One-hot cross-entropy:
soft_label = false, Label[i, 0] indicates the class index for sample i:
soft_label = false, Label[i, 0] indicates the class index for sample i:
Y[i] = -log(X[i, Label[i]])
$Y[i] = -\log(X[i, Label[i]])$
2) Soft-label cross-entropy:
2) Soft-label cross-entropy:
soft_label = true, Label[i, j] indicates the soft label of class j
soft_label = true, Label[i, j] indicates the soft label of class j
for sample i:
for sample i:
Y[i] = \sum_j{-Label[i, j] * log(X[i, j])}
$Y[i] = \sum_j{-Label[i, j] * log(X[i, j])}$
Please make sure that in this case the summuation of each row of Label
Please make sure that in this case the summuation of each row of Label
equals one.
equals one.
...
@@ -153,8 +153,9 @@ computation.
...
@@ -153,8 +153,9 @@ computation.
non-zero element (equals 1), soft-label cross-entropy degenerates to a
non-zero element (equals 1), soft-label cross-entropy degenerates to a
one-hot cross-entropy with one-hot label representation.
one-hot cross-entropy with one-hot label representation.
Both the input `X` and `Label` can carry the LoD (Level of Details) information,
Both the input X and Label can carry the LoD (Level of Details) information,
or not. But the output only shares the LoD with input `X`.
or not. But the output only shares the LoD information with input X.
)DOC"
);
)DOC"
);
}
}
};
};
...
...
paddle/operators/decayed_adagrad_op.cc
浏览文件 @
6a07af06
...
@@ -75,11 +75,18 @@ class DecayedAdagradOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -75,11 +75,18 @@ class DecayedAdagradOpMaker : public framework::OpProtoAndCheckerMaker {
"Constant for numerical stability"
)
"Constant for numerical stability"
)
.
SetDefault
(
1.0e-6
f
);
.
SetDefault
(
1.0e-6
f
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Decayed Adagrad Optimizer.
Decayed Adagrad
The update is done as follows:
moment_out = decay * moment + (1 - decay) * grad * grad
$$
param_out = param - learning_rate * grad / (sqrt(moment_out) + epsilon)
moment\_out = decay * moment + (1 - decay) * grad * grad \\
param\_out = param - \frac{learning\_rate * grad}{\sqrt{moment\_out} + epsilon}
$$
The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
does not have an epsilon attribute. It is added here for numerical
stability to avoid the division by zero error.
)DOC"
);
)DOC"
);
}
}
...
...
paddle/operators/dropout_op.cc
浏览文件 @
6a07af06
...
@@ -43,22 +43,24 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -43,22 +43,24 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
DropoutOpMaker
(
framework
::
OpProto
*
proto
,
DropoutOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddAttr
<
float
>
(
"dropout_prob"
,
"Probability of setting units to zero."
)
.
SetDefault
(
.5
f
);
AddAttr
<
bool
>
(
"is_training"
,
"Whether in training phase."
).
SetDefault
(
true
);
AddAttr
<
int
>
(
"seed"
,
"Dropout random seed."
).
SetDefault
(
0
);
AddInput
(
"X"
,
"The input of dropout op."
);
AddInput
(
"X"
,
"The input of dropout op."
);
AddOutput
(
"Out"
,
"The output of dropout op."
);
AddOutput
(
"Out"
,
"The output of dropout op."
);
AddOutput
(
"Mask"
,
"The random sampled dropout mask."
).
AsIntermediate
();
AddOutput
(
"Mask"
,
"The random sampled dropout mask."
).
AsIntermediate
();
AddAttr
<
float
>
(
"dropout_prob"
,
"Probability of setting units to zero."
)
.
SetDefault
(
.5
f
);
AddAttr
<
bool
>
(
"is_training"
,
"True if in training phase."
).
SetDefault
(
true
);
AddAttr
<
int
>
(
"seed"
,
"Dropout random seed."
).
SetDefault
(
0
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Dropout Operator.
Dropout Operator.
'Dropout'
refers to randomly dropping out units in a nerual network. It is a
Dropout
refers to randomly dropping out units in a nerual network. It is a
regularization technique for reducing overfitting by preventing neuron
regularization technique for reducing overfitting by preventing neuron
co-adaption during training. The dropout operator randomly set (according to
co-adaption during training. The dropout operator randomly set (according to
the given dropout probability) the outputs of some units to zero, while others
the given dropout probability) the outputs of some units to zero, while others
being set to their inputs.
are set equal to their corresponding inputs.
)DOC"
);
)DOC"
);
}
}
};
};
...
...
paddle/operators/dynamic_recurrent_op.cc
浏览文件 @
6a07af06
...
@@ -386,12 +386,13 @@ class DynamicRecurrentOpProtoAndCheckerMaker
...
@@ -386,12 +386,13 @@ class DynamicRecurrentOpProtoAndCheckerMaker
RNNAlgorithm
::
kArgNames
[
RNNAlgorithm
::
ComputeMode
::
kForward
];
RNNAlgorithm
::
kArgNames
[
RNNAlgorithm
::
ComputeMode
::
kForward
];
// inputs and outputs stored in proto
// inputs and outputs stored in proto
AddInput
(
name
.
inlinks
,
AddInput
(
name
.
inlinks
,
"
t
he inputs that need to be segmented for each step."
)
"
T
he inputs that need to be segmented for each step."
)
.
AsDuplicable
();
.
AsDuplicable
();
AddInput
(
name
.
initial_states
,
"
variables to initializ
e states."
)
AddInput
(
name
.
initial_states
,
"
Variables to initialize th
e states."
)
.
AsDuplicable
();
.
AsDuplicable
();
AddOutput
(
name
.
outlinks
,
"the outputs that need to concated for all steps."
)
AddOutput
(
name
.
outlinks
,
"The outputs that need to be concatenated for all steps."
)
.
AsDuplicable
();
.
AsDuplicable
();
AddOutput
(
name
.
step_scopes
,
"step scopes"
);
AddOutput
(
name
.
step_scopes
,
"step scopes"
);
...
@@ -399,7 +400,12 @@ class DynamicRecurrentOpProtoAndCheckerMaker
...
@@ -399,7 +400,12 @@ class DynamicRecurrentOpProtoAndCheckerMaker
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
ex_states
,
"names of ex_states"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
ex_states
,
"names of ex_states"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
states
,
"names of states"
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
name
.
states
,
"names of states"
);
AddComment
(
"This is a RNN operator for varience-length sequences."
);
AddComment
(
R"DOC(
Dynamic Recurrent Operator.
This is a RNN operator for varience-length sequences.
)DOC"
);
}
}
};
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录