Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
df2b054b
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
df2b054b
编写于
1月 03, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
follow comments refine code
上级
43606158
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
36 addition
and
81 deletion
+36
-81
paddle/gserver/layers/MKLPackedRecurrentLayer.cpp
paddle/gserver/layers/MKLPackedRecurrentLayer.cpp
+26
-38
paddle/gserver/layers/MKLPackedRecurrentLayer.h
paddle/gserver/layers/MKLPackedRecurrentLayer.h
+7
-22
paddle/gserver/layers/MKLPackedWeight.h
paddle/gserver/layers/MKLPackedWeight.h
+3
-17
paddle/gserver/layers/RecurrentLayer.cpp
paddle/gserver/layers/RecurrentLayer.cpp
+0
-4
未找到文件。
paddle/gserver/layers/MKLPackedRecurrentLayer.cpp
浏览文件 @
df2b054b
...
...
@@ -53,28 +53,19 @@ void MKLPackedRecurrentLayer::forwardBatch(int batchSize,
REGISTER_TIMER_INFO
(
"RecurrentFwBatch"
,
getName
().
c_str
());
/* forward one batch */
for
(
size_t
n
=
0
;
n
<
batchValue_
->
getNumBatch
();
n
++
)
{
MatrixPtr
batch
2
=
batchValue_
->
getBatchValue
(
n
);
MatrixPtr
batch
Value
=
batchValue_
->
getBatchValue
(
n
);
if
(
n
!=
0
)
{
MatrixPtr
batch1
=
batchValue_
->
getBatchValue
(
n
-
1
,
batch
2
->
getHeight
());
MatrixPtr
preBatchValue
=
batchValue_
->
getBatchValue
(
n
-
1
,
batch
Value
->
getHeight
());
// batch2->mul(*batch1, *weight_->getW(), 1, 1);
packed_weight_
->
compute
(
batch2
,
batch1
);
}
#pragma omp parallel for collapse(2)
for
(
size_t
i
=
0
;
i
<
batch2
->
getHeight
();
i
++
)
{
for
(
size_t
j
=
0
;
j
<
batch2
->
getWidth
();
j
++
)
{
*
(
batch2
->
getData
()
+
i
*
batch2
->
getWidth
()
+
j
)
=
*
(
batch2
->
getData
()
+
i
*
batch2
->
getWidth
()
+
j
)
>
0
?
*
(
batch2
->
getData
()
+
i
*
batch2
->
getWidth
()
+
j
)
:
0
;
}
packed_weight_
->
compute
(
batchValue
,
preBatchValue
);
}
Argument
arg
;
arg
.
value
=
batchValue
;
activation_
->
forward
(
arg
).
check
();
}
}
batchValue_
->
copyBackSeq
(
*
output_
.
value
);
}
...
...
@@ -94,25 +85,27 @@ void MKLPackedRecurrentLayer::backwardBatch(int batchSize,
REGISTER_TIMER_INFO
(
"RecurrentBwData"
,
getName
().
c_str
());
/* backward one batch */
for
(
int
n
=
(
int
)
numBatch
-
1
;
n
>=
0
;
n
--
)
{
MatrixPtr
batch2
=
batchGrad_
->
getBatchValue
(
n
);
MatrixPtr
batch1
=
batchValue_
->
getBatchValue
(
n
,
batch2
->
getHeight
());
MatrixPtr
batchGrad
=
batchGrad_
->
getBatchValue
(
n
);
MatrixPtr
batchValue
=
batchValue_
->
getBatchValue
(
n
,
batchGrad
->
getHeight
());
Argument
arg
;
arg
.
value
=
batch
1
;
arg
.
grad
=
batch
2
;
arg
.
value
=
batch
Value
;
arg
.
grad
=
batch
Grad
;
activation_
->
backward
(
arg
).
check
();
if
(
n
!=
0
)
{
batch1
=
batchGrad_
->
getBatchValue
(
n
-
1
,
batch2
->
getHeight
());
// batch1->mul(*batch2, *weightT, 1, 1);
packed_weightT_
->
compute
(
batch1
,
batch2
);
batchValue
=
batchGrad_
->
getBatchValue
(
n
-
1
,
batchGrad
->
getHeight
());
packed_weightT_
->
compute
(
batchValue
,
batchGrad
);
}
if
(
backwardByBatch
&&
weight_
->
getWGrad
())
{
if
(
n
!=
0
)
{
/* backward weight */
batch1
=
batchValue_
->
getBatchValue
(
n
-
1
,
batch2
->
getHeight
());
weight_
->
getWGrad
()
->
mul
(
*
batch1
->
getTranspose
(),
*
batch2
,
1
,
1
);
batchValue
=
batchValue_
->
getBatchValue
(
n
-
1
,
batchGrad
->
getHeight
());
weight_
->
getWGrad
()
->
mul
(
*
batchValue
->
getTranspose
(),
*
batchGrad
,
1
,
1
);
}
}
}
...
...
@@ -124,19 +117,14 @@ void MKLPackedRecurrentLayer::backwardBatch(int batchSize,
REGISTER_TIMER_INFO
(
"RecurrentBwWeight"
,
getName
().
c_str
());
for
(
size_t
seq
=
0
;
seq
<
numSequences
;
++
seq
)
{
int
len
=
starts
[
seq
+
1
]
-
starts
[
seq
];
if
(
!
reversed_
)
{
weight_
->
getWGrad
()
->
mul
(
*
output_
.
value
->
subMatrix
(
starts
[
seq
],
len
-
1
)
->
getTranspose
(),
*
output_
.
grad
->
subMatrix
(
starts
[
seq
]
+
1
,
len
-
1
),
1
,
1
);
}
else
{
weight_
->
getWGrad
()
->
mul
(
*
output_
.
value
->
subMatrix
(
starts
[
seq
]
+
1
,
len
-
1
)
->
getTranspose
(),
*
output_
.
grad
->
subMatrix
(
starts
[
seq
],
len
-
1
),
1
,
1
);
}
weight_
->
getWGrad
()
->
mul
(
*
output_
.
value
->
subMatrix
(
reversed_
?
starts
[
seq
]
+
1
:
starts
[
seq
],
len
-
1
)
->
getTranspose
(),
*
output_
.
grad
->
subMatrix
(
reversed_
?
starts
[
seq
]
:
starts
[
seq
]
+
1
,
len
-
1
),
1
,
1
);
}
}
}
...
...
paddle/gserver/layers/MKLPackedRecurrentLayer.h
浏览文件 @
df2b054b
...
...
@@ -14,36 +14,18 @@ limitations under the License. */
#pragma once
#include <gflags/gflags.h>
#include "Layer.h"
#include "MKLPackedWeight.h"
#include "RecurrentLayer.h"
#include "SequenceToBatch.h"
#include "paddle/utils/Stat.h"
DECLARE_bool
(
rnn_use_batch
);
namespace
paddle
{
/**
* @brief MKLPackedRecurrentLayer takes 1 input layer. The output size is the
* same with
* input layer.
* For each sequence [start, end] it performs the following computation:
* \f[
* out_{i} = act(in_{i}) \ \ \text{for} \ i = start \\
* out_{i} = act(in_{i} + out_{i-1} * W) \ \ \text{for} \ start < i <= end
*
* \f]
* If reversed is true, the order is reversed:
* \f[
* out_{i} = act(in_{i}) \ \ \text{for} \ i = end \\
* out_{i} = act(in_{i} + out_{i+1} * W) \ \ \text{for} \ start <= i < end
* \f]
* There are two methods to calculate rnn. One way is to compute rnn one
* sequence by one sequence. The other way is to reorganize the input
* into batches, then compute rnn one batch by one batch. Users can select
* them by rnn_use_batch flag.
* @brief MKLPackedRecurrentLayer is same with RecurrentLayer but is optimized
* with MKL cblas packed gemm.
* More details:
* https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/mkl/mkl_packed.md
*/
class
MKLPackedRecurrentLayer
:
public
RecurrentLayer
{
...
...
@@ -66,7 +48,10 @@ protected:
const
int
*
starts
)
override
;
protected:
/// packed_weight_ is contains same data with
/// RecurrentLayer::weight_ but is packed
std
::
unique_ptr
<
MKLPackedWeight
>
packed_weight_
;
/// packed_weightT_ is the transposition matrix of packed_weight_
std
::
unique_ptr
<
MKLPackedWeight
>
packed_weightT_
;
};
...
...
paddle/gserver/layers/MKLPackedWeight.h
浏览文件 @
df2b054b
...
...
@@ -22,7 +22,9 @@ namespace paddle {
class
MKLPackedWeight
{
protected:
/// The pointor of weight
real
*
weight_
;
/// The pointor of cblas packed gemm to weight
real
*
packedWeight_
;
size_t
height_
;
size_t
width_
;
...
...
@@ -41,7 +43,7 @@ public:
void
pack
()
{
pack_
(
weight_
);
}
void
compute
(
MatrixPtr
dst
,
MatrixPtr
src
)
{
void
compute
(
MatrixPtr
dst
,
const
MatrixPtr
src
)
{
cblas_sgemm_compute
(
CblasRowMajor
,
CblasNoTrans
,
CblasPacked
,
...
...
@@ -57,22 +59,6 @@ public:
dst
->
getWidth
());
}
void
compute
(
size_t
M
,
real
*
A
,
size_t
lda
,
real
*
C
,
size_t
ldc
)
{
cblas_sgemm_compute
(
CblasRowMajor
,
CblasNoTrans
,
CblasPacked
,
M
,
width_
,
height_
,
A
,
lda
,
packedWeight_
,
width_
,
1.0
,
C
,
ldc
);
}
protected:
void
pack_
(
real
*
src
)
{
if
(
!
packedWeight_
)
{
...
...
paddle/gserver/layers/RecurrentLayer.cpp
浏览文件 @
df2b054b
...
...
@@ -13,10 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "RecurrentLayer.h"
#include <gflags/gflags.h>
#include "Layer.h"
#include "SequenceToBatch.h"
#include "paddle/utils/Stat.h"
DEFINE_bool
(
rnn_use_batch
,
false
,
"Using the batch method for calculation."
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录