Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
11f189ba
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
11f189ba
编写于
10月 24, 2018
作者:
N
nhzlx
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into fix_demo_ci_trt
test=develop
上级
ae8f2607
88376697
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
20 addition
and
217 deletion
+20
-217
paddle/fluid/framework/framework.proto
paddle/fluid/framework/framework.proto
+0
-1
paddle/fluid/framework/op_proto_maker.cc
paddle/fluid/framework/op_proto_maker.cc
+0
-53
paddle/fluid/framework/op_proto_maker.h
paddle/fluid/framework/op_proto_maker.h
+0
-11
paddle/fluid/framework/op_proto_maker_test.cc
paddle/fluid/framework/op_proto_maker_test.cc
+0
-117
paddle/fluid/framework/parallel_executor.cc
paddle/fluid/framework/parallel_executor.cc
+4
-6
paddle/fluid/operators/activation_op.cc
paddle/fluid/operators/activation_op.cc
+1
-1
paddle/fluid/operators/adam_op.cc
paddle/fluid/operators/adam_op.cc
+3
-3
paddle/fluid/operators/batch_norm_op.cc
paddle/fluid/operators/batch_norm_op.cc
+3
-5
paddle/fluid/operators/conv_op.cc
paddle/fluid/operators/conv_op.cc
+2
-4
paddle/fluid/operators/elementwise_op.h
paddle/fluid/operators/elementwise_op.h
+0
-5
paddle/fluid/operators/mean_op.cc
paddle/fluid/operators/mean_op.cc
+1
-1
paddle/fluid/operators/pool_op.cc
paddle/fluid/operators/pool_op.cc
+2
-4
paddle/fluid/operators/sgd_op.cc
paddle/fluid/operators/sgd_op.cc
+1
-2
paddle/fluid/operators/softmax_op.cc
paddle/fluid/operators/softmax_op.cc
+1
-2
paddle/fluid/operators/sum_op.cc
paddle/fluid/operators/sum_op.cc
+1
-1
paddle/fluid/operators/top_k_op.cc
paddle/fluid/operators/top_k_op.cc
+1
-1
未找到文件。
paddle/fluid/framework/framework.proto
浏览文件 @
11f189ba
...
@@ -80,7 +80,6 @@ message OpProto {
...
@@ -80,7 +80,6 @@ message OpProto {
optional
bool
duplicable
=
3
[
default
=
false
];
optional
bool
duplicable
=
3
[
default
=
false
];
optional
bool
intermediate
=
4
[
default
=
false
];
optional
bool
intermediate
=
4
[
default
=
false
];
optional
bool
dispensable
=
5
[
default
=
false
];
optional
bool
dispensable
=
5
[
default
=
false
];
optional
string
reuse
=
6
;
}
}
// AttrProto describes the C++ type Attribute.
// AttrProto describes the C++ type Attribute.
...
...
paddle/fluid/framework/op_proto_maker.cc
浏览文件 @
11f189ba
...
@@ -21,7 +21,6 @@ namespace framework {
...
@@ -21,7 +21,6 @@ namespace framework {
void
OpProtoAndCheckerMaker
::
Validate
()
{
void
OpProtoAndCheckerMaker
::
Validate
()
{
validated_
=
true
;
validated_
=
true
;
CheckNoDuplicatedInOutAttrs
();
CheckNoDuplicatedInOutAttrs
();
CheckReuseVars
();
}
}
OpProtoAndCheckerMaker
::
VariableBuilder
OpProtoAndCheckerMaker
::
AddInput
(
OpProtoAndCheckerMaker
::
VariableBuilder
OpProtoAndCheckerMaker
::
AddInput
(
...
@@ -40,40 +39,6 @@ OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddOutput(
...
@@ -40,40 +39,6 @@ OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddOutput(
return
OpProtoAndCheckerMaker
::
VariableBuilder
{
output
};
return
OpProtoAndCheckerMaker
::
VariableBuilder
{
output
};
}
}
void
OpProtoAndCheckerMaker
::
Reuse
(
const
std
::
string
&
name
,
const
std
::
string
&
reused_name
)
{
bool
found
=
false
;
proto
::
OpProto
::
Var
*
var
;
for
(
auto
&
var
:
proto_
->
inputs
())
{
if
(
var
.
name
()
==
reused_name
)
{
found
=
true
;
break
;
}
}
PADDLE_ENFORCE
(
found
==
true
,
"Input/Output name: %s reused_name: %s, one of them is not "
"exists or not matched."
,
name
,
reused_name
);
found
=
false
;
for
(
int
i
=
0
;
i
<
proto_
->
outputs
().
size
();
++
i
)
{
var
=
proto_
->
mutable_outputs
()
->
Mutable
(
i
);
if
(
var
->
name
()
==
name
)
{
PADDLE_ENFORCE
(
!
var
->
has_reuse
(),
"Output(%s) has been set reused var of %s"
,
name
,
var
->
reuse
());
found
=
true
;
var
->
set_reuse
(
reused_name
);
break
;
}
}
PADDLE_ENFORCE
(
found
==
true
,
"Input/Output name: %s reused_name: %s, one of them is not "
"exists or not matched."
,
name
,
reused_name
);
}
void
OpProtoAndCheckerMaker
::
CheckNoDuplicatedInOutAttrs
()
{
void
OpProtoAndCheckerMaker
::
CheckNoDuplicatedInOutAttrs
()
{
std
::
unordered_set
<
std
::
string
>
names
;
std
::
unordered_set
<
std
::
string
>
names
;
auto
checker
=
[
&
](
const
std
::
string
&
name
)
{
auto
checker
=
[
&
](
const
std
::
string
&
name
)
{
...
@@ -91,24 +56,6 @@ void OpProtoAndCheckerMaker::CheckNoDuplicatedInOutAttrs() {
...
@@ -91,24 +56,6 @@ void OpProtoAndCheckerMaker::CheckNoDuplicatedInOutAttrs() {
}
}
}
}
void
OpProtoAndCheckerMaker
::
CheckReuseVars
()
{
std
::
unordered_set
<
std
::
string
>
names
;
for
(
auto
&
input
:
proto_
->
inputs
())
{
names
.
insert
(
input
.
name
());
}
auto
checker
=
[
&
](
const
std
::
string
&
name
,
const
std
::
string
&
reused
)
{
PADDLE_ENFORCE
(
names
.
count
(
reused
),
"Output [%s] reuse Input [%s], but the input is not registered."
,
name
,
reused
);
};
for
(
auto
&
output
:
proto_
->
outputs
())
{
if
(
output
.
has_reuse
())
{
checker
(
output
.
name
(),
output
.
reuse
());
}
}
}
void
OpProtoAndCheckerMaker
::
operator
()(
proto
::
OpProto
*
proto
,
void
OpProtoAndCheckerMaker
::
operator
()(
proto
::
OpProto
*
proto
,
OpAttrChecker
*
attr_checker
)
{
OpAttrChecker
*
attr_checker
)
{
proto_
=
proto
;
proto_
=
proto
;
...
...
paddle/fluid/framework/op_proto_maker.h
浏览文件 @
11f189ba
...
@@ -14,8 +14,6 @@ limitations under the License. */
...
@@ -14,8 +14,6 @@ limitations under the License. */
#pragma once
#pragma once
#include <string>
#include <string>
#include <unordered_set>
#include "glog/logging.h"
#include "glog/logging.h"
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/framework.pb.h"
...
@@ -73,11 +71,6 @@ class OpProtoAndCheckerMaker {
...
@@ -73,11 +71,6 @@ class OpProtoAndCheckerMaker {
var_
->
set_dispensable
(
true
);
var_
->
set_dispensable
(
true
);
return
*
this
;
return
*
this
;
}
}
VariableBuilder
&
Reuse
(
const
std
::
string
&
name
)
{
var_
->
set_reuse
(
name
);
return
*
this
;
}
};
};
VariableBuilder
AddInput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
);
VariableBuilder
AddInput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
);
...
@@ -85,8 +78,6 @@ class OpProtoAndCheckerMaker {
...
@@ -85,8 +78,6 @@ class OpProtoAndCheckerMaker {
VariableBuilder
AddOutput
(
const
std
::
string
&
name
,
VariableBuilder
AddOutput
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
);
const
std
::
string
&
comment
);
void
Reuse
(
const
std
::
string
&
name
,
const
std
::
string
&
reused_name
);
template
<
typename
T
>
template
<
typename
T
>
TypedAttrChecker
<
T
>
&
AddAttr
(
const
std
::
string
&
name
,
TypedAttrChecker
<
T
>
&
AddAttr
(
const
std
::
string
&
name
,
const
std
::
string
&
comment
,
const
std
::
string
&
comment
,
...
@@ -105,8 +96,6 @@ class OpProtoAndCheckerMaker {
...
@@ -105,8 +96,6 @@ class OpProtoAndCheckerMaker {
void
CheckNoDuplicatedInOutAttrs
();
void
CheckNoDuplicatedInOutAttrs
();
void
Validate
();
void
Validate
();
void
CheckReuseVars
();
proto
::
OpProto
*
proto_
;
proto
::
OpProto
*
proto_
;
OpAttrChecker
*
op_checker_
;
OpAttrChecker
*
op_checker_
;
bool
validated_
{
false
};
bool
validated_
{
false
};
...
...
paddle/fluid/framework/op_proto_maker_test.cc
浏览文件 @
11f189ba
...
@@ -47,120 +47,3 @@ TEST(ProtoMaker, DuplicatedInOut) {
...
@@ -47,120 +47,3 @@ TEST(ProtoMaker, DuplicatedInOut) {
ASSERT_THROW
(
proto_maker
(
&
op_proto
,
&
op_checker
),
ASSERT_THROW
(
proto_maker
(
&
op_proto
,
&
op_checker
),
paddle
::
platform
::
EnforceNotMet
);
paddle
::
platform
::
EnforceNotMet
);
}
}
class
TestInplaceProtoMaker
:
public
paddle
::
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
{
AddInput
(
"X"
,
"input of test op"
);
AddOutput
(
"XOut"
,
"output of test op"
).
Reuse
(
"X"
);
}
};
class
TestInplaceProtoMaker2
:
public
paddle
::
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
{
AddInput
(
"X"
,
"input of test op"
);
AddOutput
(
"XOut"
,
"output of test op"
).
Reuse
(
"X"
);
AddOutput
(
"NoOut"
,
"output of test op"
).
Reuse
(
"NotExists"
);
}
};
TEST
(
ProtoMaker
,
InplaceOutput
)
{
paddle
::
framework
::
proto
::
OpProto
op_proto
,
op_proto2
;
paddle
::
framework
::
OpAttrChecker
op_checker
;
TestInplaceProtoMaker
proto_maker
;
TestInplaceProtoMaker2
proto_maker2
;
proto_maker
(
&
op_proto
,
&
op_checker
);
ASSERT_THROW
(
proto_maker2
(
&
op_proto2
,
&
op_checker
),
paddle
::
platform
::
EnforceNotMet
);
}
// normal reuse
class
TestReuseProtoMaker
:
public
paddle
::
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
{
AddInput
(
"X"
,
"input of test op"
);
AddInput
(
"Y"
,
"input of test op"
);
AddOutput
(
"Out"
,
"output of test op"
);
AddOutput
(
"XOut"
,
"output of test op"
);
// avoid destructor exception.
// Validate();
TestReuse
();
}
virtual
void
TestReuse
()
{}
};
// test duplicate reuse error
class
TestReuseProtoMaker2
:
public
TestReuseProtoMaker
{
public:
void
TestReuse
()
{
Reuse
(
"Out"
,
"X"
);
Reuse
(
"Out"
,
"Y"
);
}
};
// NotExists Input
class
TestReuseProtoMaker3
:
public
TestReuseProtoMaker
{
public:
void
TestReuse
()
{
Reuse
(
"Out"
,
"NotExists"
);
Reuse
(
"XOut"
,
"X"
);
}
};
// NotExists Output
class
TestReuseProtoMaker4
:
public
TestReuseProtoMaker
{
public:
void
TestReuse
()
{
Reuse
(
"NotExists"
,
"X"
);
}
};
TEST
(
ProtoMaker
,
Reuse
)
{
paddle
::
framework
::
proto
::
OpProto
op_proto
;
paddle
::
framework
::
OpAttrChecker
op_checker
;
TestReuseProtoMaker
proto_maker
;
proto_maker
(
&
op_proto
,
&
op_checker
);
}
// NOTE(dzhwinter):
// There is a Fatal CHECK on base class destructor, which will call abort inside
// instead of
// throw an exception. If we throw an exception in Make(), we will trigger the
// CHECK and terminate the tests.
//
// I had tried to replace the default CHECK with a exception, however, it's
// still not supported by glog.
// the details:
// https://github.com/google/glog/issues/249
// https://github.com/facebookresearch/TensorComprehensions/issues/351
/*
TEST(ProtoMaker, ReuseWithException) {
paddle::framework::proto::OpProto op_proto2, op_proto3, op_proto4;
paddle::framework::OpAttrChecker op_checker;
TestReuseProtoMaker2 proto_maker2;
TestReuseProtoMaker3 proto_maker3;
TestReuseProtoMaker4 proto_maker4;
EXPECT_THROW(proto_maker2(&op_proto2, &op_checker),
paddle::platform::EnforceNotMet);
EXPECT_THROW(proto_maker3(&op_proto3, &op_checker),
paddle::platform::EnforceNotMet);
EXPECT_THROW(proto_maker4(&op_proto4, &op_checker),
paddle::platform::EnforceNotMet);
}
void FailureFunction() {
throw std::runtime_error("Check failed in destructor.");
// return 0;
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
google::InstallFailureFunction(&FailureFunction);
return RUN_ALL_TESTS();
}
*/
paddle/fluid/framework/parallel_executor.cc
浏览文件 @
11f189ba
...
@@ -156,12 +156,10 @@ ParallelExecutor::ParallelExecutor(
...
@@ -156,12 +156,10 @@ ParallelExecutor::ParallelExecutor(
params
,
member_
->
local_scopes_
,
member_
->
use_cuda_
);
params
,
member_
->
local_scopes_
,
member_
->
use_cuda_
);
#endif
#endif
if
(
VLOG_IS_ON
(
5
))
{
// If the loss_var_name is given, the number of graph should be only one.
// If the loss_var_name is given, the number of graph should be only one.
if
(
loss_var_name
.
size
())
{
if
(
loss_var_name
.
size
())
{
PADDLE_ENFORCE_EQ
(
ir
::
GraphNum
(
*
graph
),
1
,
PADDLE_ENFORCE_EQ
(
ir
::
GraphNum
(
*
graph
),
1
,
"The number of graph should be only one"
);
"The number of graph should be only one"
);
}
}
}
if
(
exec_strategy
.
type_
==
ExecutionStrategy
::
kDefault
)
{
if
(
exec_strategy
.
type_
==
ExecutionStrategy
::
kDefault
)
{
...
...
paddle/fluid/operators/activation_op.cc
浏览文件 @
11f189ba
...
@@ -28,7 +28,7 @@ using paddle::framework::Tensor;
...
@@ -28,7 +28,7 @@ using paddle::framework::Tensor;
public: \
public: \
void Make() override { \
void Make() override { \
AddInput("X", "Input of " #OP_NAME " operator"); \
AddInput("X", "Input of " #OP_NAME " operator"); \
AddOutput("Out", "Output of " #OP_NAME " operator")
.Reuse("X");
\
AddOutput("Out", "Output of " #OP_NAME " operator")
;
\
AddAttr<bool>("use_mkldnn", \
AddAttr<bool>("use_mkldnn", \
"(bool, default false) Only used in mkldnn kernel") \
"(bool, default false) Only used in mkldnn kernel") \
.SetDefault(false); \
.SetDefault(false); \
...
...
paddle/fluid/operators/adam_op.cc
浏览文件 @
11f189ba
...
@@ -92,9 +92,9 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -92,9 +92,9 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"Beta1Pow"
,
"(Tensor) Input beta1 power accumulator"
);
AddInput
(
"Beta1Pow"
,
"(Tensor) Input beta1 power accumulator"
);
AddInput
(
"Beta2Pow"
,
"(Tensor) Input beta2 power accumulator"
);
AddInput
(
"Beta2Pow"
,
"(Tensor) Input beta2 power accumulator"
);
AddOutput
(
"ParamOut"
,
"(Tensor) Output parameter"
)
.
Reuse
(
"Param"
)
;
AddOutput
(
"ParamOut"
,
"(Tensor) Output parameter"
);
AddOutput
(
"Moment1Out"
,
"(Tensor) Output first moment"
)
.
Reuse
(
"Moment1"
)
;
AddOutput
(
"Moment1Out"
,
"(Tensor) Output first moment"
);
AddOutput
(
"Moment2Out"
,
"(Tensor) Output second moment"
)
.
Reuse
(
"Moment2"
)
;
AddOutput
(
"Moment2Out"
,
"(Tensor) Output second moment"
);
AddAttr
<
float
>
(
"beta1"
,
AddAttr
<
float
>
(
"beta1"
,
"(float, default 0.9) "
"(float, default 0.9) "
...
...
paddle/fluid/operators/batch_norm_op.cc
浏览文件 @
11f189ba
...
@@ -135,15 +135,13 @@ class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -135,15 +135,13 @@ class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"Variance"
,
AddInput
(
"Variance"
,
"The global variance (for training) "
"The global variance (for training) "
"or estimated Variance (for testing)"
);
"or estimated Variance (for testing)"
);
AddOutput
(
"Y"
,
"result after normalization"
)
.
Reuse
(
"X"
)
;
AddOutput
(
"Y"
,
"result after normalization"
);
AddOutput
(
"MeanOut"
,
AddOutput
(
"MeanOut"
,
"Share memory with Mean. "
"Share memory with Mean. "
"Store the global mean when training"
)
"Store the global mean when training"
);
.
Reuse
(
"Mean"
);
AddOutput
(
"VarianceOut"
,
AddOutput
(
"VarianceOut"
,
"Share memory with Variance. "
"Share memory with Variance. "
"Store the global Variance when training"
)
"Store the global Variance when training"
);
.
Reuse
(
"Variance"
);
AddOutput
(
"SavedMean"
,
AddOutput
(
"SavedMean"
,
"Mean of the current mini batch, "
"Mean of the current mini batch, "
"will apply to output when training"
)
"will apply to output when training"
)
...
...
paddle/fluid/operators/conv_op.cc
浏览文件 @
11f189ba
...
@@ -130,8 +130,7 @@ void Conv2DOpMaker::Make() {
...
@@ -130,8 +130,7 @@ void Conv2DOpMaker::Make() {
.
AsDispensable
();
.
AsDispensable
();
AddOutput
(
"Output"
,
AddOutput
(
"Output"
,
"(Tensor) The output tensor of convolution operator. "
"(Tensor) The output tensor of convolution operator. "
"The format of output tensor is also NCHW."
)
"The format of output tensor is also NCHW."
);
.
Reuse
(
"Input"
);
AddInput
(
"ResidualData"
,
AddInput
(
"ResidualData"
,
"(Tensor) Tensor with residual data "
"(Tensor) Tensor with residual data "
"to which convolution output will be added."
"to which convolution output will be added."
...
@@ -238,8 +237,7 @@ void Conv3DOpMaker::Make() {
...
@@ -238,8 +237,7 @@ void Conv3DOpMaker::Make() {
"input image channels divided by the groups."
);
"input image channels divided by the groups."
);
AddOutput
(
"Output"
,
AddOutput
(
"Output"
,
"(Tensor) The output tensor of convolution operator."
"(Tensor) The output tensor of convolution operator."
"The format of output tensor is also NCDHW."
)
"The format of output tensor is also NCDHW."
);
.
Reuse
(
"Input"
);
AddAttr
<
std
::
vector
<
int
>>
(
"strides"
,
AddAttr
<
std
::
vector
<
int
>>
(
"strides"
,
"(vector<int>, default:{1, 1, 1}), the "
"(vector<int>, default:{1, 1, 1}), the "
"strides(d_stride, h_stride, w_stride) of "
"strides(d_stride, h_stride, w_stride) of "
...
...
paddle/fluid/operators/elementwise_op.h
浏览文件 @
11f189ba
...
@@ -80,8 +80,6 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -80,8 +80,6 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
void
Make
()
final
{
void
Make
()
final
{
AddInput
(
"X"
,
"(Tensor), The first input tensor of elementwise op."
);
AddInput
(
"X"
,
"(Tensor), The first input tensor of elementwise op."
);
AddInput
(
"Y"
,
"(Tensor), The second input tensor of elementwise op."
);
AddInput
(
"Y"
,
"(Tensor), The second input tensor of elementwise op."
);
// AddOutput("SavedShape", "(Tensor), save X, Y shape for grad to save
// memory.").AsIntermediate();
AddOutput
(
"Out"
,
"The output of elementwise op."
);
AddOutput
(
"Out"
,
"The output of elementwise op."
);
AddAttr
<
int
>
(
"axis"
,
AddAttr
<
int
>
(
"axis"
,
"(int, default -1). The start dimension index "
"(int, default -1). The start dimension index "
...
@@ -129,13 +127,11 @@ But the output only shares the LoD information with the input $X$.
...
@@ -129,13 +127,11 @@ But the output only shares the LoD information with the input $X$.
)DOC"
,
)DOC"
,
GetName
(),
GetEquation
()));
GetName
(),
GetEquation
()));
SetReuse
();
}
}
protected:
protected:
virtual
std
::
string
GetName
()
const
=
0
;
virtual
std
::
string
GetName
()
const
=
0
;
virtual
std
::
string
GetEquation
()
const
=
0
;
virtual
std
::
string
GetEquation
()
const
=
0
;
virtual
void
SetReuse
()
{}
};
};
class
ElementwiseOpGrad
:
public
framework
::
OperatorWithKernel
{
class
ElementwiseOpGrad
:
public
framework
::
OperatorWithKernel
{
...
@@ -269,7 +265,6 @@ class ElemwiseGradKernel : public framework::OpKernel<T> {
...
@@ -269,7 +265,6 @@ class ElemwiseGradKernel : public framework::OpKernel<T> {
protected: \
protected: \
virtual std::string GetName() const { return op_name; } \
virtual std::string GetName() const { return op_name; } \
virtual std::string GetEquation() const { return equation; } \
virtual std::string GetEquation() const { return equation; } \
virtual void SetReuse() { Reuse(__VA_ARGS__); } \
}; \
}; \
REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \
REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \
__ElemwiseOp##op_type##Maker__, \
__ElemwiseOp##op_type##Maker__, \
...
...
paddle/fluid/operators/mean_op.cc
浏览文件 @
11f189ba
...
@@ -34,7 +34,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -34,7 +34,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker {
public:
public:
void
Make
()
override
{
void
Make
()
override
{
AddInput
(
"X"
,
"(Tensor) The input of mean op"
);
AddInput
(
"X"
,
"(Tensor) The input of mean op"
);
AddOutput
(
"Out"
,
"(Tensor) The output of mean op"
)
.
Reuse
(
"X"
)
;
AddOutput
(
"Out"
,
"(Tensor) The output of mean op"
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Mean Operator calculates the mean of all elements in X.
Mean Operator calculates the mean of all elements in X.
...
...
paddle/fluid/operators/pool_op.cc
浏览文件 @
11f189ba
...
@@ -151,8 +151,7 @@ void Pool2dOpMaker::Make() {
...
@@ -151,8 +151,7 @@ void Pool2dOpMaker::Make() {
"The format of output tensor is also NCHW, "
"The format of output tensor is also NCHW, "
"where N is batch size, C is the number of channels, "
"where N is batch size, C is the number of channels, "
"H is the height of the feature, "
"H is the height of the feature, "
"and W is the width of the feature."
)
"and W is the width of the feature."
);
.
Reuse
(
"X"
);
AddAttr
<
std
::
string
>
(
"pooling_type"
,
AddAttr
<
std
::
string
>
(
"pooling_type"
,
"(string), pooling type, can be
\"
max
\"
for max-pooling "
"(string), pooling type, can be
\"
max
\"
for max-pooling "
...
@@ -252,8 +251,7 @@ void Pool3dOpMaker::Make() {
...
@@ -252,8 +251,7 @@ void Pool3dOpMaker::Make() {
"The format of output tensor is also NCDHW, "
"The format of output tensor is also NCDHW, "
"where N is batch size, C is "
"where N is batch size, C is "
"the number of channels, and D, H and W is the depth, height and "
"the number of channels, and D, H and W is the depth, height and "
"width of the feature, respectively."
)
"width of the feature, respectively."
);
.
Reuse
(
"X"
);
AddAttr
<
std
::
string
>
(
"pooling_type"
,
AddAttr
<
std
::
string
>
(
"pooling_type"
,
"(string) Pooling type, can be
\"
max
\"
for max-pooling "
"(string) Pooling type, can be
\"
max
\"
for max-pooling "
...
...
paddle/fluid/operators/sgd_op.cc
浏览文件 @
11f189ba
...
@@ -77,8 +77,7 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -77,8 +77,7 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"Grad"
,
"(Tensor or SelectedRows) Input gradient"
);
AddInput
(
"Grad"
,
"(Tensor or SelectedRows) Input gradient"
);
AddOutput
(
"ParamOut"
,
AddOutput
(
"ParamOut"
,
"(Tensor or SelectedRows, same with Param) "
"(Tensor or SelectedRows, same with Param) "
"Output parameter, should share the same memory with Param"
)
"Output parameter, should share the same memory with Param"
);
.
Reuse
(
"Param"
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
SGD operator
SGD operator
...
...
paddle/fluid/operators/softmax_op.cc
浏览文件 @
11f189ba
...
@@ -80,8 +80,7 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -80,8 +80,7 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput
(
"X"
,
AddInput
(
"X"
,
"The input tensor of softmax, "
"The input tensor of softmax, "
"whose last dimension is the input_feature_dimensions."
);
"whose last dimension is the input_feature_dimensions."
);
AddOutput
(
"Out"
,
"The normalized values with the same shape as X."
)
AddOutput
(
"Out"
,
"The normalized values with the same shape as X."
);
.
Reuse
(
"X"
);
AddAttr
<
bool
>
(
AddAttr
<
bool
>
(
"use_cudnn"
,
"use_cudnn"
,
"(bool, default false) Only used in cudnn kernel, need install cudnn"
)
"(bool, default false) Only used in cudnn kernel, need install cudnn"
)
...
...
paddle/fluid/operators/sum_op.cc
浏览文件 @
11f189ba
...
@@ -132,7 +132,7 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -132,7 +132,7 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker {
void
Make
()
override
{
void
Make
()
override
{
AddInput
(
"X"
,
"(vector<Tensor>) The input tensors of sum operator."
)
AddInput
(
"X"
,
"(vector<Tensor>) The input tensors of sum operator."
)
.
AsDuplicable
();
.
AsDuplicable
();
AddOutput
(
"Out"
,
"(Tensor) The output tensor of sum operator."
)
.
Reuse
(
"X"
)
;
AddOutput
(
"Out"
,
"(Tensor) The output tensor of sum operator."
);
AddAttr
<
bool
>
(
"use_mkldnn"
,
AddAttr
<
bool
>
(
"use_mkldnn"
,
"(bool, default false) Only used in mkldnn kernel"
)
"(bool, default false) Only used in mkldnn kernel"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
);
...
...
paddle/fluid/operators/top_k_op.cc
浏览文件 @
11f189ba
...
@@ -50,7 +50,7 @@ class TopkOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -50,7 +50,7 @@ class TopkOpMaker : public framework::OpProtoAndCheckerMaker {
public:
public:
void
Make
()
override
{
void
Make
()
override
{
AddInput
(
"X"
,
"(Tensor) The input of Topk op"
);
AddInput
(
"X"
,
"(Tensor) The input of Topk op"
);
AddOutput
(
"Out"
,
"(Tensor) The output tensor of Topk op"
)
.
Reuse
(
"X"
)
;
AddOutput
(
"Out"
,
"(Tensor) The output tensor of Topk op"
);
AddOutput
(
"Indices"
,
"(Tensor) The indices of Topk elements of input"
);
AddOutput
(
"Indices"
,
"(Tensor) The indices of Topk elements of input"
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Top K operator
Top K operator
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录