Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
18cf0786
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
18cf0786
编写于
8月 08, 2017
作者:
Y
Yi Wang
提交者:
GitHub
8月 08, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3300 from dzhwinter/type_alias
"remove type alias"
上级
e31a469e
22f03c39
变更
42
显示空白变更内容
内联
并排
Showing
42 changed file
with
283 addition
and
244 deletion
+283
-244
paddle/framework/backward.cc
paddle/framework/backward.cc
+1
-0
paddle/framework/backward_test.cc
paddle/framework/backward_test.cc
+10
-4
paddle/framework/pybind.cc
paddle/framework/pybind.cc
+18
-17
paddle/operators/CMakeLists.txt
paddle/operators/CMakeLists.txt
+1
-0
paddle/operators/add_op.cc
paddle/operators/add_op.cc
+10
-7
paddle/operators/add_op.cu
paddle/operators/add_op.cu
+3
-1
paddle/operators/add_op.h
paddle/operators/add_op.h
+9
-3
paddle/operators/add_op_test.cc
paddle/operators/add_op_test.cc
+2
-2
paddle/operators/cross_entropy_op.cc
paddle/operators/cross_entropy_op.cc
+12
-9
paddle/operators/cross_entropy_op.cu
paddle/operators/cross_entropy_op.cu
+5
-0
paddle/operators/cross_entropy_op.h
paddle/operators/cross_entropy_op.h
+7
-5
paddle/operators/fc_op.cc
paddle/operators/fc_op.cc
+10
-3
paddle/operators/fill_zeros_like_op.cc
paddle/operators/fill_zeros_like_op.cc
+3
-3
paddle/operators/fill_zeros_like_op.cu
paddle/operators/fill_zeros_like_op.cu
+2
-1
paddle/operators/fill_zeros_like_op.h
paddle/operators/fill_zeros_like_op.h
+2
-1
paddle/operators/mean_op.cc
paddle/operators/mean_op.cc
+11
-8
paddle/operators/mean_op.cu
paddle/operators/mean_op.cu
+5
-2
paddle/operators/mean_op.h
paddle/operators/mean_op.h
+14
-5
paddle/operators/mul_op.cc
paddle/operators/mul_op.cc
+8
-7
paddle/operators/mul_op.cu
paddle/operators/mul_op.cu
+3
-1
paddle/operators/mul_op.h
paddle/operators/mul_op.h
+10
-4
paddle/operators/net_op.cc
paddle/operators/net_op.cc
+0
-1
paddle/operators/net_op.h
paddle/operators/net_op.h
+0
-6
paddle/operators/net_op_test.cc
paddle/operators/net_op_test.cc
+9
-13
paddle/operators/recurrent_op.cc
paddle/operators/recurrent_op.cc
+8
-5
paddle/operators/recurrent_op_test.cc
paddle/operators/recurrent_op_test.cc
+5
-1
paddle/operators/rnn/recurrent_op_utils.cc
paddle/operators/rnn/recurrent_op_utils.cc
+8
-7
paddle/operators/rnn/recurrent_op_utils.h
paddle/operators/rnn/recurrent_op_utils.h
+3
-2
paddle/operators/rowwise_add_op.cc
paddle/operators/rowwise_add_op.cc
+9
-6
paddle/operators/rowwise_add_op.cu
paddle/operators/rowwise_add_op.cu
+3
-2
paddle/operators/rowwise_add_op.h
paddle/operators/rowwise_add_op.h
+12
-3
paddle/operators/sgd_op.cc
paddle/operators/sgd_op.cc
+7
-5
paddle/operators/sgd_op.cu
paddle/operators/sgd_op.cu
+3
-1
paddle/operators/sgd_op.h
paddle/operators/sgd_op.h
+9
-3
paddle/operators/sigmoid_op.cc
paddle/operators/sigmoid_op.cc
+13
-9
paddle/operators/sigmoid_op.cu
paddle/operators/sigmoid_op.cu
+6
-3
paddle/operators/sigmoid_op.h
paddle/operators/sigmoid_op.h
+11
-6
paddle/operators/softmax_op.cc
paddle/operators/softmax_op.cc
+13
-9
paddle/operators/softmax_op.cu
paddle/operators/softmax_op.cu
+7
-5
paddle/operators/softmax_op.h
paddle/operators/softmax_op.h
+11
-9
paddle/operators/type_alias.h
paddle/operators/type_alias.h
+0
-55
paddle/pybind/CMakeLists.txt
paddle/pybind/CMakeLists.txt
+0
-10
未找到文件。
paddle/framework/backward.cc
浏览文件 @
18cf0786
...
...
@@ -13,6 +13,7 @@
limitations under the License. */
#include "paddle/framework/backward.h"
#include <list>
#include "paddle/framework/op_registry.h"
#include "paddle/operators/net_op.h"
...
...
paddle/framework/backward_test.cc
浏览文件 @
18cf0786
...
...
@@ -17,16 +17,21 @@
#include <gtest/gtest.h>
#include "paddle/framework/op_registry.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/type_alias.h"
namespace
paddle
{
namespace
framework
{
using
OperatorBase
=
framework
::
OperatorBase
;
using
OpProtoAndCheckerMaker
=
framework
::
OpProtoAndCheckerMaker
;
using
OpProto
=
framework
::
OpProto
;
using
OpAttrChecker
=
framework
::
OpAttrChecker
;
using
Scope
=
framework
::
Scope
;
using
DeviceContext
=
platform
::
DeviceContext
;
class
EmptyOp
:
public
OperatorBase
{
public:
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
DeviceContext
&
dev_ctx
)
const
override
{}
};
class
RowWiseAddOpMaker
:
public
OpProtoAndCheckerMaker
{
...
...
@@ -71,7 +76,7 @@ class NoGradOpMaker : public OpProtoAndCheckerMaker {
}
};
class
FcOp
:
public
ops
::
NetOp
{
class
FcOp
:
public
op
erator
s
::
NetOp
{
public:
void
Init
()
override
{
AddOp
(
OpRegistry
::
CreateOp
(
"mul"
,
{
Input
(
"X"
),
Input
(
"W"
)},
...
...
@@ -143,6 +148,7 @@ class AddOpMaker : public OpProtoAndCheckerMaker {
}
// namespace paddle
namespace
f
=
paddle
::
framework
;
namespace
ops
=
paddle
::
operators
;
using
EnforceNotMet
=
paddle
::
platform
::
EnforceNotMet
;
REGISTER_OP
(
rowwise_add
,
f
::
EmptyOp
,
f
::
RowWiseAddOpMaker
);
REGISTER_GRADIENT_OP
(
rowwise_add
,
rowwise_add_grad
,
f
::
EmptyOp
);
...
...
paddle/framework/pybind.cc
浏览文件 @
18cf0786
...
...
@@ -18,11 +18,8 @@ limitations under the License. */
#include "paddle/framework/backward.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/tensor_py.h"
#include "paddle/operators/net_op.h"
#include "paddle/operators/type_alias.h"
#include "paddle/platform/enforce.h"
#include "paddle/platform/place.h"
#include "pybind11/numpy.h"
...
...
@@ -45,6 +42,9 @@ USE_OP_WITHOUT_KERNEL(recurrent_op);
USE_OP
(
uniform_random
);
namespace
paddle
{
namespace
framework
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
ClassType
>
void
ExposeOperator
(
ClassType
&
m
)
{
m
.
def
(
"infer_shape"
,
&
ClassType
::
type
::
InferShape
)
...
...
@@ -150,8 +150,8 @@ All parameter, weight, gradient are variables in Paddle.
[](
Variable
&
self
)
->
Tensor
*
{
return
self
.
GetMutable
<
Tensor
>
();
},
py
::
return_value_policy
::
reference
)
.
def
(
"get_net"
,
[](
Variable
&
self
)
->
ops
::
NetOp
*
{
return
self
.
GetMutable
<
ops
::
NetOp
>
();
[](
Variable
&
self
)
->
op
erator
s
::
NetOp
*
{
return
self
.
GetMutable
<
op
erator
s
::
NetOp
>
();
},
py
::
return_value_policy
::
reference
);
...
...
@@ -230,23 +230,24 @@ All parameter, weight, gradient are variables in Paddle.
ExposeOperator
(
operator_base
);
py
::
class_
<
op
s
::
NetOp
,
std
::
shared_ptr
<
op
s
::
NetOp
>>
net
(
m
,
"Net"
);
py
::
class_
<
op
erators
::
NetOp
,
std
::
shared_ptr
<
operator
s
::
NetOp
>>
net
(
m
,
"Net"
);
net
.
def_static
(
"create"
,
[]()
->
std
::
shared_ptr
<
ops
::
NetOp
>
{
auto
retv
=
std
::
make_shared
<
ops
::
NetOp
>
();
[]()
->
std
::
shared_ptr
<
op
erator
s
::
NetOp
>
{
auto
retv
=
std
::
make_shared
<
op
erator
s
::
NetOp
>
();
retv
->
type_
=
"plain_net"
;
return
retv
;
})
.
def
(
"add_op"
,
&
ops
::
NetOp
::
AddOp
)
.
def
(
"add_op"
,
[](
ops
::
NetOp
&
self
,
const
std
::
shared_ptr
<
op
s
::
NetOp
>
&
net
)
->
void
{
.
def
(
"add_op"
,
&
op
erator
s
::
NetOp
::
AddOp
)
.
def
(
"add_op"
,
[](
operators
::
NetOp
&
self
,
const
std
::
shared_ptr
<
operator
s
::
NetOp
>
&
net
)
->
void
{
self
.
AddOp
(
std
::
static_pointer_cast
<
OperatorBase
>
(
net
));
})
.
def
(
"complete_add_op"
,
&
ops
::
NetOp
::
CompleteAddOp
)
.
def
(
"complete_add_op"
,
[](
std
::
shared_ptr
<
ops
::
NetOp
>
&
self
)
{
self
->
CompleteAddOp
();
});
.
def
(
"complete_add_op"
,
&
operators
::
NetOp
::
CompleteAddOp
)
.
def
(
"complete_add_op"
,
[](
std
::
shared_ptr
<
operators
::
NetOp
>
&
self
)
{
self
->
CompleteAddOp
();
});
ExposeOperator
(
net
);
...
...
paddle/operators/CMakeLists.txt
浏览文件 @
18cf0786
...
...
@@ -59,6 +59,7 @@ op_library(cross_entropy_op SRCS cross_entropy_op.cc cross_entropy_op.cu)
op_library
(
fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu
)
op_library
(
sgd_op SRCS sgd_op.cc sgd_op.cu
)
cc_test
(
sgd_op_test SRCS sgd_op_test.cc DEPS sgd_op
)
op_library
(
fc_op
SRCS fc_op.cc
...
...
paddle/operators/add_op.cc
浏览文件 @
18cf0786
...
...
@@ -17,9 +17,9 @@ limitations under the License. */
namespace
paddle
{
namespace
operators
{
class
AddOp
:
public
OperatorWithKernel
{
class
AddOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
.
InputSize
(),
2
);
PADDLE_ENFORCE_EQ
(
ctx
.
OutputSize
(),
1
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
0
),
"Inputs of AddOp must all be set"
);
...
...
@@ -31,9 +31,9 @@ class AddOp : public OperatorWithKernel {
}
};
class
AddOpMaker
:
public
OpProtoAndCheckerMaker
{
class
AddOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
AddOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
AddOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"The first input of add op"
);
AddInput
(
"Y"
,
"The second input of add op"
);
...
...
@@ -46,14 +46,17 @@ The equation is: Out = X + Y
}
};
class
AddOpGrad
:
public
OperatorWithKernel
{
class
AddOpGrad
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{}
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
add_two
,
ops
::
AddOp
,
ops
::
AddOpMaker
);
REGISTER_GRADIENT_OP
(
add_two
,
add_two_grad
,
ops
::
AddOpGrad
);
REGISTER_OP_CPU_KERNEL
(
add_two
,
ops
::
AddKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
add_two
,
ops
::
AddKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/add_op.cu
浏览文件 @
18cf0786
...
...
@@ -16,4 +16,6 @@
#include "paddle/framework/op_registry.h"
#include "paddle/operators/add_op.h"
REGISTER_OP_GPU_KERNEL
(
add_two
,
ops
::
AddKernel
<
ops
::
GPUPlace
,
float
>
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
add_two
,
ops
::
AddKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/add_op.h
浏览文件 @
18cf0786
...
...
@@ -13,15 +13,21 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/type_alias.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
Place
,
typename
T
>
class
AddKernel
:
public
OpKernel
{
class
AddKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
input0
=
context
.
Input
<
Tensor
>
(
0
);
auto
input1
=
context
.
Input
<
Tensor
>
(
1
);
auto
output
=
context
.
Output
<
Tensor
>
(
0
);
...
...
paddle/operators/add_op_test.cc
浏览文件 @
18cf0786
...
...
@@ -14,9 +14,9 @@ limitations under the License. */
#include <gtest/gtest.h>
#define private public
#include <paddle/framework/op_registry.h>
#include "paddle/framework/op_registry.h"
USE_OP
(
add_two
);
// USE_OP(add_two_grad);
TEST
(
AddOp
,
GetOpProto
)
{
auto
&
protos
=
paddle
::
framework
::
OpRegistry
::
protos
();
...
...
paddle/operators/cross_entropy_op.cc
浏览文件 @
18cf0786
...
...
@@ -17,9 +17,9 @@ limitations under the License. */
namespace
paddle
{
namespace
operators
{
class
OnehotCrossEntropyOp
:
public
OperatorWithKernel
{
class
OnehotCrossEntropyOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
.
InputSize
(),
2
,
"Input size of OnehotCrossEntropyOp must be two"
);
PADDLE_ENFORCE_EQ
(
ctx
.
OutputSize
(),
1
,
...
...
@@ -37,9 +37,9 @@ class OnehotCrossEntropyOp : public OperatorWithKernel {
}
};
class
OnehotCrossEntropyGradientOp
:
public
OperatorWithKernel
{
class
OnehotCrossEntropyGradientOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
auto
X_grad
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
...
...
@@ -48,9 +48,10 @@ class OnehotCrossEntropyGradientOp : public OperatorWithKernel {
}
};
class
OnehotCrossEntropyOpMaker
:
public
OpProtoAndCheckerMaker
{
class
OnehotCrossEntropyOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
OnehotCrossEntropyOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
OnehotCrossEntropyOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"The first input of OnehotCrossEntropyOp"
);
AddInput
(
"label"
,
"The second input of OnehotCrossEntropyOp"
);
...
...
@@ -66,12 +67,14 @@ OnehotCrossEntropy Operator.
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
onehot_cross_entropy
,
ops
::
OnehotCrossEntropyOp
,
ops
::
OnehotCrossEntropyOpMaker
);
REGISTER_OP_CPU_KERNEL
(
onehot_cross_entropy
,
ops
::
OnehotCrossEntropyOpKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
onehot_cross_entropy
,
ops
::
OnehotCrossEntropyOpKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
REGISTER_GRADIENT_OP
(
onehot_cross_entropy
,
onehot_cross_entropy_grad
,
ops
::
OnehotCrossEntropyGradientOp
);
REGISTER_OP_CPU_KERNEL
(
onehot_cross_entropy_grad
,
ops
::
OnehotCrossEntropyGradientOpKernel
<
ops
::
CPUPlace
,
float
>
);
ops
::
OnehotCrossEntropyGradientOpKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/cross_entropy_op.cu
浏览文件 @
18cf0786
...
...
@@ -14,3 +14,8 @@
#define EIGEN_USE_GPU
#include "paddle/operators/cross_entropy_op.h"
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
onehot_cross_entropy
,
ops
::
OnehotCrossEntropyOpKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/cross_entropy_op.h
浏览文件 @
18cf0786
...
...
@@ -13,11 +13,13 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/
operators/type_alias
.h"
#include "paddle/
framework/op_registry
.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
>
T
tolerable_value
(
T
x
)
{
static_assert
(
std
::
is_floating_point
<
T
>::
value
,
...
...
@@ -38,9 +40,9 @@ T tolerable_value(T x) {
}
template
<
typename
Place
,
typename
T
>
class
OnehotCrossEntropyOpKernel
:
public
OpKernel
{
class
OnehotCrossEntropyOpKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
const
T
*
Xdata
=
X
->
data
<
T
>
();
const
int
*
label_data
=
ctx
.
Input
<
Tensor
>
(
1
)
->
data
<
int
>
();
...
...
@@ -61,9 +63,9 @@ class OnehotCrossEntropyOpKernel : public OpKernel {
};
template
<
typename
Place
,
typename
T
>
class
OnehotCrossEntropyGradientOpKernel
:
public
OpKernel
{
class
OnehotCrossEntropyGradientOpKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
X
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
dX
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
dY
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
...
...
paddle/operators/fc_op.cc
浏览文件 @
18cf0786
...
...
@@ -12,11 +12,16 @@
See the License for the specific language governing permissions and
limitations under the License. */
#include "type_alias.h"
#include "paddle/operators/net_op.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
using
OpRegistry
=
framework
::
OpRegistry
;
class
FullyConnectedOp
:
public
NetOp
{
public:
void
Init
()
override
{
...
...
@@ -39,9 +44,10 @@ class FullyConnectedOp : public NetOp {
}
};
class
FullyConnectedOpMaker
:
public
OpProtoAndCheckerMaker
{
class
FullyConnectedOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
FullyConnectedOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
FullyConnectedOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"the input of fc operator"
);
AddInput
(
"W"
,
"the weight of fc operator"
);
...
...
@@ -66,4 +72,5 @@ USE_OP(rowwise_add);
USE_OP
(
sigmoid
);
USE_OP
(
softmax
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
fc
,
ops
::
FullyConnectedOp
,
ops
::
FullyConnectedOpMaker
);
paddle/operators/fill_zeros_like_op.cc
浏览文件 @
18cf0786
...
...
@@ -50,8 +50,8 @@ The output will have the same size with input.
}
// namespace operators
}
// namespace paddle
REGISTER_OP
(
fill_zeros_like
,
paddle
::
operators
::
FillZerosLikeOp
,
paddle
::
operator
s
::
FillZerosLikeOpMaker
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
fill_zeros_like
,
ops
::
FillZerosLikeOp
,
op
s
::
FillZerosLikeOpMaker
);
REGISTER_OP_CPU_KERNEL
(
fill_zeros_like
,
paddle
::
operator
s
::
FillZerosLikeKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
op
s
::
FillZerosLikeKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/fill_zeros_like_op.cu
浏览文件 @
18cf0786
...
...
@@ -16,6 +16,7 @@
#include "paddle/framework/op_registry.h"
#include "paddle/operators/fill_zeros_like_op.h"
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
fill_zeros_like
,
paddle
::
operator
s
::
FillZerosLikeKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
op
s
::
FillZerosLikeKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/fill_zeros_like_op.h
浏览文件 @
18cf0786
...
...
@@ -13,7 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/type_alias.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
...
...
paddle/operators/mean_op.cc
浏览文件 @
18cf0786
...
...
@@ -17,9 +17,9 @@ limitations under the License. */
namespace
paddle
{
namespace
operators
{
class
MeanOp
:
public
OperatorWithKernel
{
class
MeanOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
.
InputSize
(),
1
,
"Input size of AddOp must be one"
);
PADDLE_ENFORCE_EQ
(
ctx
.
OutputSize
(),
1
,
"Output size of AddOp must be one"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
0
),
"input should be set"
);
...
...
@@ -28,9 +28,9 @@ class MeanOp : public OperatorWithKernel {
}
};
class
MeanOpMaker
:
public
OpProtoAndCheckerMaker
{
class
MeanOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
MeanOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
MeanOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"The input of mean op"
);
AddOutput
(
"Out"
,
"The output of mean op"
).
IgnoreGradient
();
...
...
@@ -38,9 +38,9 @@ class MeanOpMaker : public OpProtoAndCheckerMaker {
}
};
class
MeanGradOp
:
public
OperatorWithKernel
{
class
MeanGradOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
ctx
.
Output
<
Tensor
>
(
"X"
+
framework
::
kGradVarSuffix
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
());
}
...
...
@@ -49,7 +49,10 @@ class MeanGradOp : public OperatorWithKernel {
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
mean
,
ops
::
MeanOp
,
ops
::
MeanOpMaker
);
REGISTER_OP_CPU_KERNEL
(
mean
,
ops
::
MeanKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
mean
,
ops
::
MeanKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
REGISTER_GRADIENT_OP
(
mean
,
mean_grad
,
ops
::
MeanGradOp
);
REGISTER_OP_CPU_KERNEL
(
mean_grad
,
ops
::
MeanGradKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
mean_grad
,
ops
::
MeanGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/mean_op.cu
浏览文件 @
18cf0786
...
...
@@ -16,5 +16,8 @@
#include "paddle/operators/mean_op.h"
REGISTER_OP_GPU_KERNEL
(
mean
,
ops
::
MeanKernel
<
ops
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
mean_grad
,
ops
::
MeanGradKernel
<
ops
::
GPUPlace
,
float
>
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
mean
,
ops
::
MeanKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
mean_grad
,
ops
::
MeanGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/mean_op.h
浏览文件 @
18cf0786
...
...
@@ -13,15 +13,24 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/type_alias.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenScalar
=
framework
::
EigenScalar
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
Place
,
typename
T
>
class
MeanKernel
:
public
OpKernel
{
class
MeanKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
input
=
context
.
Input
<
Tensor
>
(
0
);
auto
output
=
context
.
Output
<
Tensor
>
(
0
);
...
...
@@ -36,9 +45,9 @@ class MeanKernel : public OpKernel {
};
template
<
typename
Place
,
typename
T
>
class
MeanGradKernel
:
public
OpKernel
{
class
MeanGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
OG
=
context
.
Input
<
Tensor
>
(
"Out"
+
framework
::
kGradVarSuffix
);
PADDLE_ENFORCE
(
framework
::
product
(
OG
->
dims
())
==
1
,
"Mean Gradient should be scalar"
);
...
...
paddle/operators/mul_op.cc
浏览文件 @
18cf0786
...
...
@@ -17,9 +17,9 @@
namespace
paddle
{
namespace
operators
{
class
MulOp
:
public
OperatorWithKernel
{
class
MulOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
2
,
"The mul op must take two inputs"
);
auto
dim0
=
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
();
auto
dim1
=
ctx
.
Input
<
Tensor
>
(
1
)
->
dims
();
...
...
@@ -37,9 +37,9 @@ class MulOp : public OperatorWithKernel {
}
};
class
MulOpMaker
:
public
OpProtoAndCheckerMaker
{
class
MulOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
MulOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
MulOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"The first input of mul op"
);
AddInput
(
"Y"
,
"The second input of mul op"
);
...
...
@@ -52,9 +52,9 @@ The equation is: Out = X * Y
}
};
class
MulOpGrad
:
public
OperatorWithKernel
{
class
MulOpGrad
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{}
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{}
std
::
string
DebugString
()
const
override
{
LOG
(
INFO
)
<<
"MulGrad"
;
return
""
;
...
...
@@ -64,7 +64,8 @@ class MulOpGrad : public OperatorWithKernel {
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
mul
,
ops
::
MulOp
,
ops
::
MulOpMaker
);
REGISTER_GRADIENT_OP
(
mul
,
mul_grad
,
ops
::
MulOpGrad
);
REGISTER_OP_CPU_KERNEL
(
mul
,
ops
::
MulKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
mul
,
ops
::
MulKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/mul_op.cu
浏览文件 @
18cf0786
...
...
@@ -15,4 +15,6 @@
#define EIGEN_USE_GPU
#include "paddle/operators/mul_op.h"
REGISTER_OP_GPU_KERNEL
(
mul
,
ops
::
MulKernel
<
ops
::
GPUPlace
,
float
>
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
mul
,
ops
::
MulKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/mul_op.h
浏览文件 @
18cf0786
...
...
@@ -13,16 +13,21 @@
limitations under the License. */
#pragma once
#include "paddle/
operators/type_alias
.h"
#include "paddle/framework/eigen.h"
#include "paddle/
framework/op_registry
.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
Place
,
typename
T
>
class
MulKernel
:
public
OpKernel
{
class
MulKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
Eigen
::
array
<
Eigen
::
IndexPair
<
Eigen
::
DenseIndex
>
,
1
>
dim_pair
=
{
{
Eigen
::
IndexPair
<
Eigen
::
DenseIndex
>
(
1
,
0
)}};
...
...
@@ -40,5 +45,6 @@ class MulKernel : public OpKernel {
Z
.
device
(
place
)
=
X
.
contract
(
Y
,
dim_pair
);
}
};
}
// namespace operators
}
// namespace paddle
paddle/operators/net_op.cc
浏览文件 @
18cf0786
...
...
@@ -15,7 +15,6 @@
*/
#include "paddle/operators/net_op.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
...
...
paddle/operators/net_op.h
浏览文件 @
18cf0786
...
...
@@ -14,13 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/op_proto.pb.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
#include "paddle/operators/type_alias.h"
#include "paddle/platform/device_context.h"
namespace
paddle
{
namespace
operators
{
...
...
paddle/operators/net_op_test.cc
浏览文件 @
18cf0786
...
...
@@ -2,31 +2,27 @@
#include <gtest/gtest.h>
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
namespace
paddle
{
namespace
operators
{
using
Scope
=
framework
::
Scope
;
using
DeviceContext
=
platform
::
DeviceContext
;
static
int
infer_shape_cnt
=
0
;
static
int
run_cnt
=
0
;
class
TestOp
:
public
OperatorBase
{
class
TestOp
:
public
framework
::
OperatorBase
{
public:
void
InferShape
(
const
framework
::
Scope
&
scope
)
const
override
{
++
infer_shape_cnt
;
}
void
Run
(
const
framework
::
Scope
&
scope
,
const
paddle
::
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
void
InferShape
(
const
Scope
&
scope
)
const
override
{
++
infer_shape_cnt
;
}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{
++
run_cnt
;
}
};
class
EmptyOp
:
public
OperatorBase
{
class
EmptyOp
:
public
framework
::
OperatorBase
{
public:
void
InferShape
(
const
Scope
&
scope
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
platform
::
DeviceContext
&
dev_ctx
)
const
override
{}
void
Run
(
const
Scope
&
scope
,
const
DeviceContext
&
dev_ctx
)
const
override
{}
};
template
<
typename
T
>
...
...
@@ -72,7 +68,7 @@ TEST(OpKernel, all) {
net
->
Run
(
scope
,
dev_ctx
);
ASSERT_EQ
(
2
,
infer_shape_cnt
);
ASSERT_EQ
(
2
,
run_cnt
);
ASSERT_THROW
(
net
->
AddOp
(
op2
),
p
addle
::
p
latform
::
EnforceNotMet
);
ASSERT_THROW
(
net
->
AddOp
(
op2
),
platform
::
EnforceNotMet
);
}
TEST
(
NetOp
,
insert_op
)
{
...
...
paddle/operators/recurrent_op.cc
浏览文件 @
18cf0786
...
...
@@ -14,17 +14,19 @@
#include "paddle/operators/recurrent_op.h"
#include <glog/logging.h>
#include <cstring>
#include <sstream>
#include "paddle/framework/op_registry.h"
#include "paddle/operators/net_op.h"
#include "paddle/platform/enforce.h"
namespace
paddle
{
namespace
operators
{
using
Scope
=
framework
::
Scope
;
using
Variable
=
framework
::
Variable
;
using
Tensor
=
framework
::
Tensor
;
void
RecurrentAlgorithm
::
InferShape
(
const
Scope
&
scope
)
const
{
seq_len_
=
scope
.
FindVar
((
arg_
->
inlinks
[
0
]).
external
)
->
GetMutable
<
Tensor
>
()
...
...
@@ -135,10 +137,11 @@ void RecurrentOp::Init() {
alg_
.
Init
(
std
::
move
(
arg
));
}
class
RecurrentAlgorithmProtoAndCheckerMaker
:
public
OpProtoAndCheckerMaker
{
class
RecurrentAlgorithmProtoAndCheckerMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
RecurrentAlgorithmProtoAndCheckerMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
RecurrentAlgorithmProtoAndCheckerMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
const
auto
&
name
=
RecurrentOp
::
kArgName
;
// inputs and outputs stored in proto
...
...
paddle/operators/recurrent_op_test.cc
浏览文件 @
18cf0786
...
...
@@ -27,6 +27,10 @@ namespace operators {
using
framework
::
make_ddim
;
using
framework
::
DDim
;
using
framework
::
Tensor
;
using
framework
::
Variable
;
using
framework
::
Scope
;
using
framework
::
OpRegistry
;
class
RecurrentOpTest
:
public
::
testing
::
Test
{
protected:
...
...
@@ -164,7 +168,7 @@ class RecurrentOpTest : public ::testing::Test {
// father scope
Scope
scope_
;
std
::
shared_ptr
<
OperatorBase
>
rnn_op_
;
std
::
shared_ptr
<
framework
::
OperatorBase
>
rnn_op_
;
};
TEST_F
(
RecurrentOpTest
,
Run
)
{
...
...
paddle/operators/rnn/recurrent_op_utils.cc
浏览文件 @
18cf0786
...
...
@@ -18,7 +18,9 @@ namespace paddle {
namespace
operators
{
namespace
rnn
{
namespace
fmw
=
paddle
::
framework
;
namespace
f
=
paddle
::
framework
;
using
Tensor
=
framework
::
Tensor
;
void
SegmentInputs
(
const
std
::
vector
<
Scope
*>&
step_scopes
,
const
std
::
vector
<
Link
>&
inlinks
,
const
size_t
seq_len
,
...
...
@@ -30,10 +32,10 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
inlinks
[
i
].
external
);
Tensor
*
input
=
input_var
->
GetMutable
<
Tensor
>
();
f
mw
::
DDim
dims
=
input
->
dims
();
f
::
DDim
dims
=
input
->
dims
();
PADDLE_ENFORCE
(
static_cast
<
size_t
>
(
dims
[
0
])
==
seq_len
,
"all the inlinks must have same length"
);
f
mw
::
DDim
step_dims
=
slice_ddim
(
dims
,
1
,
dims
.
size
());
f
::
DDim
step_dims
=
slice_ddim
(
dims
,
1
,
dims
.
size
());
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
Tensor
*
step_input
=
step_scopes
[
j
]
->
NewVar
(
inlinks
[
i
].
internal
)
->
GetMutable
<
Tensor
>
();
...
...
@@ -58,11 +60,10 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
auto
step_scope_var
=
step_scopes
[
0
]
->
FindVar
(
outlinks
[
i
].
internal
);
PADDLE_ENFORCE
(
step_scope_var
!=
nullptr
,
"%s not in scope"
,
outlinks
[
i
].
internal
);
fmw
::
DDim
step_dims
=
step_scope_var
->
template
GetMutable
<
Tensor
>()
->
dims
();
f
::
DDim
step_dims
=
step_scope_var
->
template
GetMutable
<
Tensor
>()
->
dims
();
std
::
vector
<
int
>
dims_vec
=
vectorize
(
step_dims
);
dims_vec
.
insert
(
dims_vec
.
begin
(),
seq_len
);
output
->
Resize
(
f
mw
::
make_ddim
(
dims_vec
));
output
->
Resize
(
f
::
make_ddim
(
dims_vec
));
}
else
{
output
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
for
(
size_t
j
=
0
;
j
<
seq_len
;
j
++
)
{
...
...
@@ -104,7 +105,7 @@ void LinkMemories(const std::vector<Scope*>& scopes,
}
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
OperatorBase
&
op
)
{
const
framework
::
OperatorBase
&
op
)
{
arg
->
step_net
=
op
.
Input
(
name
.
step_net
);
arg
->
step_scopes
=
op
.
Output
(
name
.
step_scopes
);
...
...
paddle/operators/rnn/recurrent_op_utils.h
浏览文件 @
18cf0786
...
...
@@ -17,12 +17,13 @@
#include <string>
#include "paddle/framework/operator.h"
#include "paddle/operators/type_alias.h"
namespace
paddle
{
namespace
operators
{
namespace
rnn
{
using
Scope
=
framework
::
Scope
;
/**
* Memory of a RNN (same as the role of `Momory` in PaddlePaddle).
*
...
...
@@ -86,7 +87,7 @@ void LinkMemories(const std::vector<Scope*>& step_scopes,
const
int
offset
,
bool
infer_shape_mode
);
void
InitArgument
(
const
ArgumentName
&
name
,
Argument
*
arg
,
const
OperatorBase
&
op
);
const
framework
::
OperatorBase
&
op
);
}
// namespace rnn
}
// namespace operators
...
...
paddle/operators/rowwise_add_op.cc
浏览文件 @
18cf0786
...
...
@@ -13,12 +13,13 @@
limitations under the License. */
#include "paddle/operators/rowwise_add_op.h"
namespace
paddle
{
namespace
operators
{
class
RowWiseAddOp
:
public
OperatorWithKernel
{
class
RowWiseAddOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
2UL
,
"Two inputs is needed by rowwise add"
);
auto
dim0
=
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
();
...
...
@@ -32,9 +33,10 @@ class RowWiseAddOp : public OperatorWithKernel {
}
};
class
RowWiseAddOpMaker
:
public
OpProtoAndCheckerMaker
{
class
RowWiseAddOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
RowWiseAddOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
RowWiseAddOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"The left input of row-wise add op, must be matrix"
);
AddInput
(
"b"
,
"The right input of row-wise add op, must be vector"
);
...
...
@@ -50,6 +52,7 @@ for i in xrange(X.shape[0]):
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
rowwise_add
,
ops
::
RowWiseAddOp
,
ops
::
RowWiseAddOpMaker
);
REGISTER_OP_CPU_KERNEL
(
rowwise_add
,
ops
::
RowWiseAddKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
rowwise_add
,
ops
::
RowWiseAddKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/rowwise_add_op.cu
浏览文件 @
18cf0786
...
...
@@ -15,5 +15,6 @@
#define EIGEN_USE_GPU
#include "paddle/operators/rowwise_add_op.h"
REGISTER_OP_GPU_KERNEL
(
rowwise_add
,
ops
::
RowWiseAddKernel
<
ops
::
GPUPlace
,
float
>
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
rowwise_add
,
ops
::
RowWiseAddKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/rowwise_add_op.h
浏览文件 @
18cf0786
...
...
@@ -13,15 +13,24 @@
limitations under the License. */
#pragma once
#include "paddle/operators/type_alias.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
Place
,
typename
T
>
class
RowWiseAddKernel
:
public
OpKernel
{
class
RowWiseAddKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
out
=
context
.
Output
<
Tensor
>
(
0
);
out
->
mutable_data
<
T
>
(
context
.
GetPlace
());
...
...
paddle/operators/sgd_op.cc
浏览文件 @
18cf0786
...
...
@@ -17,9 +17,9 @@ limitations under the License. */
namespace
paddle
{
namespace
operators
{
class
SGDOp
:
public
OperatorWithKernel
{
class
SGDOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
.
InputSize
(),
2
,
"Input size of SGDOp must be two"
);
PADDLE_ENFORCE_EQ
(
ctx
.
OutputSize
(),
1
,
"Output size of SGDOp must be one"
);
PADDLE_ENFORCE_NOT_NULL
(
ctx
.
InputVar
(
0
),
"inputs[0] mast be set"
);
...
...
@@ -31,9 +31,9 @@ class SGDOp : public OperatorWithKernel {
}
};
class
SGDOpMaker
:
public
OpProtoAndCheckerMaker
{
class
SGDOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
SGDOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
SGDOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"param"
,
"input parameter"
);
AddInput
(
"grad"
,
"input gradient"
);
...
...
@@ -51,5 +51,7 @@ param_out = param - learning_rate * grad;
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
sgd
,
ops
::
SGDOp
,
ops
::
SGDOpMaker
);
REGISTER_OP_CPU_KERNEL
(
sgd
,
ops
::
SGDOpKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
sgd
,
ops
::
SGDOpKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/sgd_op.cu
浏览文件 @
18cf0786
...
...
@@ -15,4 +15,6 @@
#define EIGEN_USE_GPU
#include "paddle/operators/sgd_op.h"
REGISTER_OP_GPU_KERNEL
(
sgd
,
ops
::
SGDOpKernel
<
ops
::
GPUPlace
,
float
>
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
sgd
,
ops
::
SGDOpKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/sgd_op.h
浏览文件 @
18cf0786
...
...
@@ -13,15 +13,21 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/operators/type_alias.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
Place
,
typename
T
>
class
SGDOpKernel
:
public
OpKernel
{
class
SGDOpKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
param
=
ctx
.
Input
<
Tensor
>
(
"param"
);
auto
grad
=
ctx
.
Input
<
Tensor
>
(
"grad"
);
auto
param_out
=
ctx
.
Output
<
Tensor
>
(
0
);
...
...
paddle/operators/sigmoid_op.cc
浏览文件 @
18cf0786
...
...
@@ -13,21 +13,23 @@
limitations under the License. */
#include "paddle/operators/sigmoid_op.h"
namespace
paddle
{
namespace
operators
{
class
SigmoidOp
:
public
OperatorWithKernel
{
class
SigmoidOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
.
InputSize
()
==
1
,
"Sigmoid Op only have one input"
);
PADDLE_ENFORCE
(
ctx
.
OutputSize
()
==
1
,
"Sigmoid Op only have one output"
);
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
());
}
};
class
SigmoidOpMaker
:
public
OpProtoAndCheckerMaker
{
class
SigmoidOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
SigmoidOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
SigmoidOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"sigmoid input"
);
AddOutput
(
"Y"
,
"sigmoid output"
);
...
...
@@ -35,9 +37,9 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker {
}
};
class
SigmoidOpGrad
:
public
OperatorWithKernel
{
class
SigmoidOpGrad
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
ctx
.
Output
<
Tensor
>
(
0
)
->
Resize
(
ctx
.
Input
<
Tensor
>
(
0
)
->
dims
());
}
};
...
...
@@ -45,9 +47,11 @@ class SigmoidOpGrad : public OperatorWithKernel {
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
sigmoid
,
ops
::
SigmoidOp
,
ops
::
SigmoidOpMaker
);
REGISTER_GRADIENT_OP
(
sigmoid
,
sigmoid_grad
,
ops
::
SigmoidOpGrad
);
REGISTER_OP_CPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
sigmoid_grad
,
ops
::
SigmoidGradKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
sigmoid_grad
,
ops
::
SigmoidGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/sigmoid_op.cu
浏览文件 @
18cf0786
...
...
@@ -15,6 +15,9 @@
#define EIGEN_USE_GPU
#include "paddle/operators/sigmoid_op.h"
REGISTER_OP_GPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
ops
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
sigmoid_grad
,
ops
::
SigmoidGradKernel
<
ops
::
GPUPlace
,
float
>
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
sigmoid
,
ops
::
SigmoidKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
sigmoid_grad
,
ops
::
SigmoidGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/sigmoid_op.h
浏览文件 @
18cf0786
...
...
@@ -13,16 +13,21 @@
limitations under the License. */
#pragma once
#include "paddle/
operators/type_alias
.h"
#include "paddle/framework/eigen.h"
#include "paddle/
framework/op_registry
.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
Place
,
typename
T
>
class
SigmoidKernel
:
public
OpKernel
{
class
SigmoidKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
input
=
context
.
Input
<
Tensor
>
(
0
);
auto
output
=
context
.
Output
<
Tensor
>
(
0
);
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
...
...
@@ -37,9 +42,9 @@ class SigmoidKernel : public OpKernel {
};
template
<
typename
Place
,
typename
T
>
class
SigmoidGradKernel
:
public
OpKernel
{
class
SigmoidGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
Y_t
=
context
.
Input
<
Tensor
>
(
"Y"
);
auto
dY_t
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
auto
dX_t
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
...
...
paddle/operators/softmax_op.cc
浏览文件 @
18cf0786
...
...
@@ -17,9 +17,9 @@ limitations under the License. */
namespace
paddle
{
namespace
operators
{
class
SoftmaxOp
:
public
OperatorWithKernel
{
class
SoftmaxOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
.
InputSize
(),
1UL
,
"Only one input is need for softmax"
);
PADDLE_ENFORCE_EQ
(
ctx
.
Input
<
Tensor
>
(
"X"
)
->
dims
().
size
(),
2UL
,
...
...
@@ -30,9 +30,10 @@ class SoftmaxOp : public OperatorWithKernel {
}
};
class
SoftmaxOpMaker
:
public
OpProtoAndCheckerMaker
{
class
SoftmaxOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
SoftmaxOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
SoftmaxOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"input of softmax"
);
AddOutput
(
"Y"
,
"output of softmax"
);
...
...
@@ -40,9 +41,9 @@ class SoftmaxOpMaker : public OpProtoAndCheckerMaker {
}
};
class
SoftmaxOpGrad
:
public
OperatorWithKernel
{
class
SoftmaxOpGrad
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
InferShapeContext
&
ctx
)
const
override
{
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
.
InputSize
(),
3UL
,
"Input of SoftmaxOpGrad should be 3, X, Y, YG"
);
PADDLE_ENFORCE_EQ
(
ctx
.
OutputSize
(),
1UL
,
...
...
@@ -61,8 +62,11 @@ class SoftmaxOpGrad : public OperatorWithKernel {
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OP
(
softmax
,
ops
::
SoftmaxOp
,
ops
::
SoftmaxOpMaker
);
REGISTER_OP_CPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
REGISTER_GRADIENT_OP
(
softmax
,
softmax_grad
,
ops
::
SoftmaxOpGrad
);
REGISTER_OP_CPU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradKernel
<
ops
::
CPUPlace
,
float
>
);
REGISTER_OP_CPU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
paddle/operators/softmax_op.cu
浏览文件 @
18cf0786
/* Copyright (c) 2016 PaddlePaddle Authors
.
All Rights Reserve.
/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
...
...
@@ -13,9 +13,11 @@
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h"
#include "paddle/operators/softmax_op.h"
REGISTER_OP_GPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
ops
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradKernel
<
ops
::
GPUPlace
,
float
>
);
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_GPU_KERNEL
(
softmax
,
ops
::
SoftmaxKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
REGISTER_OP_GPU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradKernel
<
paddle
::
platform
::
GPUPlace
,
float
>
);
paddle/operators/softmax_op.h
浏览文件 @
18cf0786
...
...
@@ -13,19 +13,21 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/ddim.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/tensor.h"
#include "paddle/operators/type_alias.h"
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
Place
,
typename
T
>
class
SoftmaxKernel
:
public
OpKernel
{
class
SoftmaxKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
input
=
context
.
Input
<
Tensor
>
(
"X"
);
auto
output
=
context
.
Output
<
Tensor
>
(
"Y"
);
output
->
mutable_data
<
T
>
(
context
.
GetPlace
());
...
...
@@ -62,9 +64,9 @@ class SoftmaxKernel : public OpKernel {
};
template
<
typename
Place
,
typename
T
>
class
SoftmaxGradKernel
:
public
OpKernel
{
class
SoftmaxGradKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
ExecutionContext
&
context
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
std
::
shared_ptr
<
Tensor
>
scale_
=
std
::
make_shared
<
Tensor
>
();
auto
Y
=
context
.
Input
<
Tensor
>
(
"Y"
);
...
...
paddle/operators/type_alias.h
已删除
100644 → 0
浏览文件 @
e31a469e
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/net_op.h"
namespace
paddle
{
namespace
operators
{
using
OpKernel
=
framework
::
OpKernel
;
using
OperatorBase
=
framework
::
OperatorBase
;
using
InferShapeContext
=
framework
::
InferShapeContext
;
using
ExecutionContext
=
framework
::
ExecutionContext
;
using
Variable
=
framework
::
Variable
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenScalar
=
framework
::
EigenScalar
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenVector
=
framework
::
EigenVector
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
T
,
size_t
D
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenTensor
=
framework
::
EigenTensor
<
T
,
D
,
MajorType
,
IndexType
>
;
using
Tensor
=
framework
::
Tensor
;
using
Scope
=
framework
::
Scope
;
using
OperatorWithKernel
=
framework
::
OperatorWithKernel
;
using
OperatorBase
=
framework
::
OperatorBase
;
using
OpProtoAndCheckerMaker
=
framework
::
OpProtoAndCheckerMaker
;
using
OpProto
=
framework
::
OpProto
;
using
OpAttrChecker
=
framework
::
OpAttrChecker
;
using
CPUPlace
=
platform
::
CPUPlace
;
using
GPUPlace
=
platform
::
GPUPlace
;
using
OpRegistry
=
framework
::
OpRegistry
;
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
paddle/pybind/CMakeLists.txt
已删除
100644 → 0
浏览文件 @
e31a469e
cc_library
(
paddle_pybind SHARED
SRCS pybind.cc
DEPS pybind python backward
fc_op
sgd_op
add_op
mean_op
cross_entropy_op
recurrent_op
fill_zeros_like_op
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录