Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
cb6436b5
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cb6436b5
编写于
6月 01, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
CPU implementation of row convolution
上级
94d83fcd
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
390 addition
and
0 deletion
+390
-0
paddle/function/RowConvOp.cpp
paddle/function/RowConvOp.cpp
+172
-0
paddle/function/RowConvOp.h
paddle/function/RowConvOp.h
+42
-0
paddle/gserver/layers/RowConvLayer.cpp
paddle/gserver/layers/RowConvLayer.cpp
+105
-0
paddle/gserver/layers/RowConvLayer.h
paddle/gserver/layers/RowConvLayer.h
+46
-0
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+20
-0
proto/ModelConfig.proto
proto/ModelConfig.proto
+5
-0
未找到文件。
paddle/function/RowConvOp.cpp
0 → 100644
浏览文件 @
cb6436b5
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "RowConvOp.h"
#include "paddle/math/Vector.h"
namespace
paddle
{
template
<
>
void
RowConv
<
DEVICE_TYPE_CPU
>
(
CpuMatrix
&
out
,
const
CpuMatrix
&
in
,
const
CpuMatrix
&
filter
,
const
CpuIVector
&
seq
)
{
const
int
*
starts
=
seq
.
getData
();
const
size_t
numSeq
=
seq
.
getSize
()
-
1
;
const
size_t
contextLength
=
filter
.
getHeight
();
for
(
size_t
i
=
0
;
i
<
numSeq
;
++
i
)
{
size_t
begin
=
starts
[
i
];
size_t
end
=
starts
[
i
+
1
];
for
(
size_t
j
=
begin
;
j
<
end
;
++
j
)
{
MatrixPtr
x
;
MatrixPtr
w
;
if
((
j
+
contextLength
)
<
end
)
{
x
=
(
const_cast
<
CpuMatrix
&>
(
in
)).
subMatrix
(
j
,
contextLength
);
w
=
(
const_cast
<
CpuMatrix
&>
(
filter
)).
subMatrix
(
0
,
contextLength
);
}
else
{
x
=
(
const_cast
<
CpuMatrix
&>
(
in
)).
subMatrix
(
j
,
end
-
j
);
w
=
(
const_cast
<
CpuMatrix
&>
(
filter
)).
subMatrix
(
0
,
end
-
j
);
}
MatrixPtr
y
=
out
.
subMatrix
(
j
,
1
);
y
->
addDotMulVMM
(
*
x
,
*
w
);
}
}
}
template
<
>
void
RowConvGrad
<
DEVICE_TYPE_CPU
>
(
const
CpuMatrix
&
outG
,
const
CpuMatrix
&
in
,
const
CpuMatrix
&
filter
,
CpuMatrix
&
inG
,
CpuMatrix
&
filterG
,
const
CpuIVector
&
seq
)
{
// gradient w.r.t filter
const
int
*
starts
=
seq
.
getData
();
const
size_t
numSeq
=
seq
.
getSize
()
-
1
;
const
size_t
contextLength
=
filter
.
getHeight
();
if
(
filterG
)
{
for
(
size_t
i
=
0
;
i
<
numSeq
;
++
i
)
{
size_t
begin
=
starts
[
i
];
size_t
end
=
starts
[
i
+
1
];
size_t
steps
=
end
-
begin
;
for
(
size_t
j
=
0
;
j
<
contextLength
;
++
j
)
{
MatrixPtr
x
=
(
const_cast
<
CpuMatrix
&>
(
in
)).
subMatrix
(
begin
+
j
,
steps
-
j
);
MatrixPtr
dy
=
(
const_cast
<
CpuMatrix
&>
(
outG
)).
subMatrix
(
begin
,
steps
-
j
);
MatrixPtr
dw
=
filterG
.
subMatrix
(
j
,
1
);
dw
->
addDotMulVMM
(
*
dy
,
*
x
);
}
}
}
// gradient w.r.t input feature
if
(
inG
)
{
for
(
size_t
i
=
0
;
i
<
numSeq
;
++
i
)
{
size_t
begin
=
starts
[
i
];
size_t
end
=
starts
[
i
+
1
];
size_t
steps
=
end
-
begin
;
for
(
size_t
j
=
0
;
j
<
steps
;
++
j
)
{
MatrixPtr
dx
=
inG
.
subMatrix
(
begin
+
j
,
1
);
for
(
size_t
t
=
0
;
t
<
contextLength
;
++
t
)
{
if
((
int
(
j
)
-
int
(
t
))
>=
0
)
{
MatrixPtr
dy
=
(
const_cast
<
CpuMatrix
&>
(
outG
)).
subMatrix
(
begin
+
j
-
t
,
1
);
MatrixPtr
w
=
(
const_cast
<
CpuMatrix
&>
(
filter
)).
subMatrix
(
t
,
1
);
dx
->
addDotMul
(
*
dy
,
*
w
,
1.0
,
1.0
);
}
}
}
}
}
}
/**
* \brief TODO(qingqing)
*
*/
template
<
DeviceType
Device
>
class
RowConvFunc
:
public
FunctionBase
{
public:
void
init
(
const
FuncConfig
&
config
)
override
{}
void
calc
(
const
BufferArgs
&
inputs
,
const
BufferArgs
&
outputs
)
override
{
// check
CHECK_EQ
(
2UL
,
inputs
.
size
());
CHECK_EQ
(
1UL
,
outputs
.
size
());
CHECK_EQ
(
outputs
[
0
].
getArgType
(),
ADD_TO
);
CHECK
(
inputs
[
0
].
isSequenceArg
()
&&
outputs
[
0
].
isSequenceArg
())
<<
"SequenceArg required here."
;
const
auto
in
=
dynamic_cast
<
const
SequenceArg
&>
(
inputs
[
0
]);
auto
out
=
dynamic_cast
<
const
SequenceArg
&>
(
outputs
[
0
]);
auto
w
=
inputs
[
1
];
CHECK
(
in
.
data
()
&&
out
.
data
()
&&
in
.
getSequenceId
().
data
());
CHECK_EQ
(
in
.
shape
().
ndims
(),
2UL
);
CHECK_EQ
(
out
.
shape
().
ndims
(),
2UL
);
CHECK_EQ
(
in
.
shape
()[
1
],
out
.
shape
()[
1
]);
CHECK_EQ
(
in
.
shape
()[
0
],
out
.
shape
()[
0
]);
CHECK_EQ
(
w
.
shape
()[
1
],
in
.
shape
()[
1
]);
auto
outMat
=
out
.
matrix
<
Device
>
();
const
auto
inMat
=
in
.
matrix
<
Device
>
();
const
auto
wMat
=
w
.
matrix
<
Device
>
();
const
auto
seqId
=
in
.
getSequenceId
().
vector
<
int
,
Device
>
();
RowConv
<
Device
>
(
outMat
,
inMat
,
wMat
,
seqId
);
}
};
/**
* \brief The backward propagation of padding Function. Remove the elements
* in the padding positions of forward.
*
* Argument in this Function:
*/
template
<
DeviceType
Device
>
class
RowConvGradFunc
:
public
FunctionBase
{
public:
void
init
(
const
FuncConfig
&
config
)
override
{}
void
calc
(
const
BufferArgs
&
inputs
,
const
BufferArgs
&
outputs
)
override
{
const
auto
outGrad
=
dynamic_cast
<
const
SequenceArg
&>
(
inputs
[
0
]);
const
auto
in
=
dynamic_cast
<
const
SequenceArg
&>
(
inputs
[
1
]);
const
auto
w
=
inputs
[
2
];
auto
inGrad
=
dynamic_cast
<
const
SequenceArg
&>
(
outputs
[
0
]);
auto
wGrad
=
outputs
[
1
];
const
auto
outGMat
=
outGrad
.
matrix
<
Device
>
();
const
auto
inMat
=
in
.
matrix
<
Device
>
();
const
auto
wMat
=
w
.
matrix
<
Device
>
();
auto
inGMat
=
inGrad
.
data
()
?
inGrad
.
matrix
<
Device
>
()
:
typename
Tensor
<
real
,
Device
>::
Matrix
(
nullptr
,
0
,
0
);
auto
wGMat
=
wGrad
.
data
()
?
wGrad
.
matrix
<
Device
>
()
:
typename
Tensor
<
real
,
Device
>::
Matrix
(
nullptr
,
0
,
0
);
const
auto
seqId
=
in
.
getSequenceId
().
vector
<
int
,
Device
>
();
RowConvGrad
<
Device
>
(
outGMat
,
inMat
,
wMat
,
inGMat
,
wGMat
,
seqId
);
}
};
REGISTER_TYPED_FUNC
(
RowConv
,
CPU
,
RowConvFunc
);
REGISTER_TYPED_FUNC
(
RowConvGrad
,
CPU
,
RowConvGradFunc
);
#ifndef PADDLE_ONLY_CPU
REGISTER_TYPED_FUNC
(
RowConv
,
GPU
,
RowConvFunc
);
REGISTER_TYPED_FUNC
(
RowConvGrad
,
GPU
,
PadGradFunc
);
#endif
}
// namespace paddle
paddle/function/RowConvOp.h
0 → 100644
浏览文件 @
cb6436b5
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Function.h"
namespace
paddle
{
/**
* \brief TODO(qingqing)
*
*/
template
<
DeviceType
DType
>
void
RowConv
(
typename
Tensor
<
real
,
DType
>::
Matrix
&
out
,
const
typename
Tensor
<
real
,
DType
>::
Matrix
&
in
,
const
typename
Tensor
<
real
,
DType
>::
Matrix
&
filter
,
const
typename
Tensor
<
int
,
DType
>::
Vector
&
seq
);
/**
* \brief TODO(qingqing)
*
*/
template
<
DeviceType
DType
>
void
RowConvGrad
(
const
typename
Tensor
<
real
,
DType
>::
Matrix
&
outG
,
const
typename
Tensor
<
real
,
DType
>::
Matrix
&
in
,
const
typename
Tensor
<
real
,
DType
>::
Matrix
&
filter
,
typename
Tensor
<
real
,
DType
>::
Matrix
&
inG
,
typename
Tensor
<
real
,
DType
>::
Matrix
&
filterG
,
const
typename
Tensor
<
int
,
DType
>::
Vector
&
seq
);
}
// namespace paddle
paddle/gserver/layers/RowConvLayer.cpp
0 → 100644
浏览文件 @
cb6436b5
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "RowConvLayer.h"
#include "paddle/utils/Stat.h"
namespace
paddle
{
REGISTER_LAYER
(
row_conv
,
RowConvLayer
);
bool
RowConvLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
/* Initialize the basic parent class */
Layer
::
init
(
layerMap
,
parameterMap
);
contexLength_
=
config_
.
inputs
(
0
).
row_conv_conf
().
context_length
();
CHECK_EQ
(
inputLayers_
.
size
(),
1UL
);
weight_
.
reset
(
new
Weight
(
contexLength_
,
getSize
(),
parameters_
[
0
]));
createFunction
(
forward_
,
"RowConv"
,
FuncConfig
());
createFunction
(
backward_
,
"RowConvGrad"
,
FuncConfig
());
return
true
;
}
void
RowConvLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
MatrixPtr
input
=
getInputValue
(
0
);
size_t
height
=
input
->
getHeight
();
size_t
width
=
input
->
getWidth
();
CHECK_EQ
(
width
,
getSize
());
resetOutput
(
height
,
width
);
const
auto
startPos
=
getInput
(
0
).
sequenceStartPositions
->
getVector
(
useGpu_
);
wDims_
=
TensorShape
({
contexLength_
,
width
});
MatrixPtr
outV
=
getOutputValue
();
BufferArgs
inputs
;
BufferArgs
outputs
;
inputs
.
addArg
(
*
getInputValue
(
0
),
*
startPos
);
inputs
.
addArg
(
*
weight_
->
getW
(),
wDims_
);
outputs
.
addArg
(
*
getOutputValue
(),
*
startPos
,
ADD_TO
);
{
REGISTER_TIMER_INFO
(
"RowConvForward"
,
getName
().
c_str
());
forward_
[
0
]
->
calc
(
inputs
,
outputs
);
}
/* activation */
{
REGISTER_TIMER_INFO
(
"FwAtvTimer"
,
getName
().
c_str
());
forwardActivation
();
}
}
void
RowConvLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
/* Do derivation */
{
REGISTER_TIMER_INFO
(
"BpAvtTimer"
,
getName
().
c_str
());
backwardActivation
();
}
const
auto
startPos
=
getInput
(
0
).
sequenceStartPositions
->
getVector
(
useGpu_
);
BufferArgs
inputs
;
BufferArgs
outputs
;
inputs
.
addArg
(
*
getOutputGrad
(),
*
startPos
);
inputs
.
addArg
(
*
getInputValue
(
0
),
*
startPos
);
inputs
.
addArg
(
*
weight_
->
getW
(),
*
startPos
);
MatrixPtr
inGrad
=
getInputGrad
(
0
);
MatrixPtr
wGrad
=
weight_
->
getWGrad
();
size_t
h
=
getInputValue
(
0
)
->
getHeight
();
size_t
w
=
getInputValue
(
0
)
->
getWidth
();
outputs
.
addArg
(
inGrad
?
(
*
inGrad
)
:
*
(
Matrix
::
create
(
nullptr
,
h
,
w
,
false
,
useGpu_
)),
*
startPos
,
ADD_TO
);
outputs
.
addArg
(
wGrad
?
(
*
wGrad
)
:
*
(
Matrix
::
create
(
nullptr
,
contexLength_
,
w
,
false
,
useGpu_
)),
wDims_
,
ADD_TO
);
{
REGISTER_TIMER_INFO
(
"RowConvBackward"
,
getName
().
c_str
());
backward_
[
0
]
->
calc
(
inputs
,
outputs
);
}
{
REGISTER_TIMER_INFO
(
"WeightUpdate"
,
getName
().
c_str
());
weight_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
}
// namespace paddle
paddle/gserver/layers/RowConvLayer.h
0 → 100644
浏览文件 @
cb6436b5
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Layer.h"
namespace
paddle
{
/**
* \brief Row Convolution Layer.
*/
class
RowConvLayer
:
public
Layer
{
public:
explicit
RowConvLayer
(
const
LayerConfig
&
config
)
:
Layer
(
config
)
{}
~
RowConvLayer
()
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
void
forward
(
PassType
passType
)
override
;
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
)
override
;
protected:
// Row convolution weight, context_lenght_ * fan_out.
// fan_out is the size of output feature.
std
::
unique_ptr
<
Weight
>
weight_
;
// std::unique_ptr<Weight> biases_;
// how many steps to look ahead
size_t
contexLength_
;
TensorShape
wDims_
;
};
}
// namespace paddle
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
cb6436b5
...
@@ -1705,6 +1705,26 @@ TEST(Layer, TransLayer) {
...
@@ -1705,6 +1705,26 @@ TEST(Layer, TransLayer) {
}
}
}
}
TEST
(
Layer
,
RowConvLayer
)
{
const
int
context
=
3
;
const
int
size
=
512
;
TestConfig
config
;
config
.
layerConfig
.
set_type
(
"row_conv"
);
config
.
layerConfig
.
set_size
(
size
);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
config
.
inputDefs
.
push_back
(
{
INPUT_SEQUENCE_DATA
,
"layer_0"
,
size
,
context
*
size
});
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
RowConvConfig
*
conv
=
input
->
mutable_row_conv_conf
();
conv
->
set_context_length
(
context
);
for
(
auto
useGpu
:
{
false
,
true
})
{
testLayerGrad
(
config
,
"row_conv"
,
100
,
false
,
useGpu
,
false
);
}
}
int
main
(
int
argc
,
char
**
argv
)
{
int
main
(
int
argc
,
char
**
argv
)
{
testing
::
InitGoogleTest
(
&
argc
,
argv
);
testing
::
InitGoogleTest
(
&
argc
,
argv
);
initMain
(
argc
,
argv
);
initMain
(
argc
,
argv
);
...
...
proto/ModelConfig.proto
浏览文件 @
cb6436b5
...
@@ -194,6 +194,10 @@ message MaxOutConfig {
...
@@ -194,6 +194,10 @@ message MaxOutConfig {
required
uint32
groups
=
2
;
required
uint32
groups
=
2
;
}
}
message
RowConvConfig
{
required
uint32
context_length
=
1
;
}
message
ProjectionConfig
{
message
ProjectionConfig
{
required
string
type
=
1
;
required
string
type
=
1
;
required
string
name
=
2
;
required
string
name
=
2
;
...
@@ -279,6 +283,7 @@ message LayerInputConfig {
...
@@ -279,6 +283,7 @@ message LayerInputConfig {
optional
SppConfig
spp_conf
=
12
;
optional
SppConfig
spp_conf
=
12
;
optional
PriorBoxConfig
priorbox_conf
=
13
;
optional
PriorBoxConfig
priorbox_conf
=
13
;
optional
PadConfig
pad_conf
=
14
;
optional
PadConfig
pad_conf
=
14
;
optional
RowConvConfig
row_conv_conf
=
15
;
}
}
message
LayerConfig
{
message
LayerConfig
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录