Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
09d712d6
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
09d712d6
编写于
6月 21, 2017
作者:
H
hedaoyuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove useless code(Matrix::convExpand and Matrix::convShrink).
上级
5bfcb7f8
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
0 addition
and
496 deletion
+0
-496
paddle/cuda/include/hl_cnn.h
paddle/cuda/include/hl_cnn.h
+0
-67
paddle/cuda/include/stub/hl_cnn_stub.h
paddle/cuda/include/stub/hl_cnn_stub.h
+0
-30
paddle/cuda/src/hl_cuda_cnn.cu
paddle/cuda/src/hl_cuda_cnn.cu
+0
-128
paddle/math/Matrix.cpp
paddle/math/Matrix.cpp
+0
-172
paddle/math/Matrix.h
paddle/math/Matrix.h
+0
-99
未找到文件。
paddle/cuda/include/hl_cnn.h
浏览文件 @
09d712d6
...
...
@@ -17,73 +17,6 @@ limitations under the License. */
#include "hl_base.h"
/**
* @brief Shrink column to feature.
*
* @param[in] dataCol expand data.
* @param[in] channels number of channel.
* @param[in] height image height.
* @param[in] width image width.
* @param[in] blockH filter height.
* @param[in] blockW filter width.
* @param[in] strideH stride height.
* @param[in] strideW stride width.
* @param[in] paddingH padding height.
* @param[in] paddingW padding width.
* @param[in] outputH output height.
* @param[in] outputW output width.
* @param[out] dataIm output image data.
* @param[in] alpha
* @param[in] beta
*/
extern
void
hl_shrink_col2feature
(
const
real
*
dataCol
,
size_t
channels
,
size_t
height
,
size_t
width
,
size_t
blockH
,
size_t
blockW
,
size_t
strideH
,
size_t
strideW
,
size_t
paddingH
,
size_t
paddingW
,
size_t
outputH
,
size_t
outputW
,
real
*
dataIm
,
real
alpha
=
1
.
0
f
,
real
beta
=
0
.
0
f
);
/**
* @brief Expand feature to column.
*
* @param[in] dataIm input image data.
* @param[in] channels number of channel.
* @param[in] height image height.
* @param[in] width image width.
* @param[in] blockH filter height.
* @param[in] blockW filter width.
* @param[in] strideH stride height.
* @param[in] strideW stride width.
* @param[in] paddingH padding height.
* @param[in] paddingW padding width.
* @param[in] outputH output height.
* @param[in] outputW output width.
* @param[out] dataCol expand data.
*
*/
extern
void
hl_expand_feature2col
(
const
real
*
dataIm
,
size_t
channels
,
size_t
height
,
size_t
width
,
size_t
blockH
,
size_t
blockW
,
size_t
strideH
,
size_t
strideW
,
size_t
paddingH
,
size_t
paddingW
,
size_t
outputH
,
size_t
outputW
,
real
*
dataCol
);
/**
* @brief Maximum pool forward.
*
...
...
paddle/cuda/include/stub/hl_cnn_stub.h
浏览文件 @
09d712d6
...
...
@@ -17,36 +17,6 @@ limitations under the License. */
#include "hl_cnn.h"
inline
void
hl_shrink_col2feature
(
const
real
*
dataCol
,
size_t
channels
,
size_t
height
,
size_t
width
,
size_t
blockH
,
size_t
blockW
,
size_t
strideH
,
size_t
strideW
,
size_t
paddingH
,
size_t
paddingW
,
size_t
outputH
,
size_t
outputW
,
real
*
dataIm
,
real
alpha
,
real
beta
)
{}
inline
void
hl_expand_feature2col
(
const
real
*
dataIm
,
size_t
channels
,
size_t
height
,
size_t
width
,
size_t
blockH
,
size_t
blockW
,
size_t
strideH
,
size_t
strideW
,
size_t
paddingH
,
size_t
paddingW
,
size_t
outputH
,
size_t
outputW
,
real
*
dataCol
)
{}
inline
void
hl_maxpool_forward
(
const
int
frameCnt
,
const
real
*
inputData
,
const
int
channels
,
...
...
paddle/cuda/src/hl_cuda_cnn.cu
浏览文件 @
09d712d6
...
...
@@ -18,134 +18,6 @@ limitations under the License. */
#include "hl_cnn.h"
#include "hl_device_functions.cuh"
__global__
void
KeFeature2col
(
size_t
n
,
size_t
height
,
const
real
*
data_im
,
size_t
blockH
,
size_t
blockW
,
size_t
width
,
size_t
strideH
,
size_t
strideW
,
size_t
paddingH
,
size_t
paddingW
,
size_t
height_col
,
size_t
width_col
,
real
*
data_col
)
{
size_t
index
=
(
blockIdx
.
x
*
gridDim
.
y
+
blockIdx
.
y
)
*
blockDim
.
x
+
threadIdx
.
x
;
if
(
index
<
n
)
{
size_t
w_out
=
index
%
width_col
;
index
/=
width_col
;
size_t
h_out
=
index
%
height_col
;
size_t
channel_in
=
index
/
height_col
;
size_t
channel_out
=
channel_in
*
blockH
*
blockW
;
size_t
h_in
=
h_out
*
strideH
;
size_t
w_in
=
w_out
*
strideW
;
data_col
+=
(
channel_out
*
height_col
+
h_out
)
*
width_col
+
w_out
;
for
(
size_t
i
=
0
;
i
<
blockH
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
blockW
;
++
j
)
{
int
rIdx
=
int
(
h_in
+
i
);
int
cIdx
=
int
(
w_in
+
j
);
if
((
rIdx
-
(
int
)
paddingH
)
>=
(
int
)
height
||
(
rIdx
-
(
int
)
paddingH
)
<
0
||
(
cIdx
-
(
int
)
paddingW
)
>=
(
int
)
width
||
(
cIdx
-
(
int
)
paddingW
)
<
0
)
{
*
data_col
=
0
;
}
else
{
rIdx
=
rIdx
+
channel_in
*
height
-
paddingH
;
cIdx
=
cIdx
-
paddingW
;
*
data_col
=
data_im
[
rIdx
*
width
+
cIdx
];
}
data_col
+=
height_col
*
width_col
;
}
}
}
}
void
hl_expand_feature2col
(
const
real
*
dataIm
,
size_t
channels
,
size_t
height
,
size_t
width
,
size_t
blockH
,
size_t
blockW
,
size_t
strideH
,
size_t
strideW
,
size_t
paddingH
,
size_t
paddingW
,
size_t
outputH
,
size_t
outputW
,
real
*
dataCol
)
{
size_t
numKernels
=
channels
*
outputH
*
outputW
;
size_t
blocks
=
(
numKernels
+
1024
-
1
)
/
1024
;
size_t
blockX
=
512
;
size_t
blockY
=
(
blocks
+
512
-
1
)
/
512
;
dim3
threads
(
1024
,
1
);
dim3
grid
(
blockX
,
blockY
);
KeFeature2col
<<<
grid
,
threads
,
0
,
STREAM_DEFAULT
>>>
(
numKernels
,
height
,
dataIm
,
blockH
,
blockW
,
width
,
strideH
,
strideW
,
paddingH
,
paddingW
,
outputH
,
outputW
,
dataCol
);
CHECK_SYNC
(
"hl_expand_feature2col failed"
);
}
__global__
void
KeCol2Feature
(
size_t
n
,
const
real
*
data_col
,
size_t
height
,
size_t
width
,
size_t
channels
,
size_t
blockH
,
size_t
blockW
,
size_t
strideH
,
size_t
strideW
,
size_t
paddingH
,
size_t
paddingW
,
size_t
height_col
,
size_t
width_col
,
real
*
data_im
,
real
alpha
,
real
beta
)
{
size_t
index
=
(
blockIdx
.
x
*
gridDim
.
y
+
blockIdx
.
y
)
*
blockDim
.
x
+
threadIdx
.
x
;
if
(
index
<
n
)
{
real
val
=
0
;
int
w
=
int
(
index
%
width
);
int
h
=
int
((
index
/
width
)
%
height
);
int
c
=
int
(
index
/
(
width
*
height
));
if
((
w
-
(
int
)
paddingW
)
>=
0
&&
(
w
-
(
int
)
paddingW
)
<
(
width
-
2
*
paddingW
)
&&
(
h
-
(
int
)
paddingH
)
>=
0
&&
(
h
-
paddingH
)
<
(
height
-
2
*
paddingH
))
{
// compute the start and end of the output
int
w_col_start
=
(
w
<
(
int
)
blockW
)
?
0
:
(
w
-
int
(
blockW
))
/
(
int
)
strideW
+
1
;
int
w_col_end
=
min
((
int
)(
w
/
(
int
)
strideW
+
1
),
(
int
)(
width_col
));
int
h_col_start
=
(
h
<
(
int
)
blockH
)
?
0
:
(
h
-
(
int
)
blockH
)
/
(
int
)
strideH
+
1
;
int
h_col_end
=
min
(
int
(
h
/
strideH
+
1
),
int
(
height_col
));
for
(
int
h_col
=
h_col_start
;
h_col
<
h_col_end
;
++
h_col
)
{
for
(
int
w_col
=
w_col_start
;
w_col
<
w_col_end
;
++
w_col
)
{
// the col location: [c * width * height + h_out, w_out]
int
c_col
=
int
(
c
*
blockH
*
blockW
)
+
\
(
h
-
h_col
*
(
int
)
strideH
)
*
(
int
)
blockW
+
(
w
-
w_col
*
(
int
)
strideW
);
val
+=
data_col
[(
c_col
*
height_col
+
h_col
)
*
width_col
+
w_col
];
}
}
h
-=
paddingH
;
w
-=
paddingW
;
real
tD
=
data_im
[
c
*
((
width
-
2
*
paddingW
)
*
(
height
-
2
*
paddingH
))
+
h
*
(
width
-
2
*
paddingW
)
+
w
];
data_im
[
c
*
((
width
-
2
*
paddingW
)
*
(
height
-
2
*
paddingH
))
+
h
*
(
width
-
2
*
paddingW
)
+
w
]
=
alpha
*
val
+
beta
*
tD
;
}
}
}
void
hl_shrink_col2feature
(
const
real
*
dataCol
,
size_t
channels
,
size_t
height
,
size_t
width
,
size_t
blockH
,
size_t
blockW
,
size_t
strideH
,
size_t
strideW
,
size_t
paddingH
,
size_t
paddingW
,
size_t
outputH
,
size_t
outputW
,
real
*
dataIm
,
real
alpha
,
real
beta
)
{
size_t
numKernels
=
channels
*
(
height
+
2
*
paddingH
)
*
(
width
+
2
*
paddingW
);
size_t
blocks
=
(
numKernels
+
1024
-
1
)
/
1024
;
size_t
blockX
=
512
;
size_t
blockY
=
(
blocks
+
512
-
1
)
/
512
;
dim3
threads
(
1024
,
1
);
dim3
grid
(
blockX
,
blockY
);
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
KeCol2Feature
<<<
grid
,
threads
,
0
,
STREAM_DEFAULT
>>>
(
numKernels
,
dataCol
,
height
+
2
*
paddingH
,
width
+
2
*
paddingW
,
channels
,
blockH
,
blockW
,
strideH
,
strideW
,
paddingH
,
paddingW
,
outputH
,
outputW
,
dataIm
,
alpha
,
beta
);
CHECK_SYNC
(
"hl_shrink_col2feature failed"
);
}
__global__
void
KeMaxPoolForward
(
const
int
nthreads
,
const
real
*
inputData
,
const
int
channels
,
const
int
height
,
const
int
width
,
...
...
paddle/math/Matrix.cpp
浏览文件 @
09d712d6
...
...
@@ -1016,81 +1016,6 @@ void GpuMatrix::check(std::ostream& os, Matrix& refMat, bool printDiff) {
LOG
(
INFO
)
<<
"the diffCnt is "
<<
diffCnt
;
}
void
GpuMatrix
::
convExpand
(
Matrix
&
feature
,
int
feaImgHeight
,
int
feaImgWidth
,
int
channels
,
int
blockH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
)
{
CHECK
(
feature
.
useGpu_
==
true
)
<<
"Matrix type are not equal"
;
CHECK_EQ
(
size_t
(
feaImgHeight
*
feaImgWidth
*
channels
),
feature
.
getHeight
()
*
feature
.
getWidth
())
<<
"Matrix dimensions are not equal"
;
size_t
elemCnt
=
outputH
*
outputW
*
blockH
*
blockW
*
channels
;
CHECK_EQ
(
elemCnt
,
height_
*
width_
)
<<
"Matrix dimensions are not equal"
;
hl_expand_feature2col
(
feature
.
getData
(),
channels
,
feaImgHeight
,
feaImgWidth
,
blockH
,
blockW
,
strideH
,
strideW
,
paddingH
,
paddingW
,
outputH
,
outputW
,
getData
());
}
void
GpuMatrix
::
convShrink
(
Matrix
&
expandFeat
,
int
thisImgHeight
,
int
thisImgWidth
,
int
channels
,
int
blockH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
,
real
alpha
,
real
beta
)
{
CHECK
(
expandFeat
.
useGpu_
==
true
)
<<
"Matrix type are not equal"
;
CHECK_EQ
(
size_t
(
thisImgHeight
*
thisImgWidth
*
channels
),
getHeight
()
*
getWidth
())
<<
"Matrix dimensions are not equal"
;
size_t
elemCnt
=
outputH
*
outputW
*
blockW
*
blockH
*
channels
;
CHECK
(
elemCnt
==
expandFeat
.
getHeight
()
*
expandFeat
.
getWidth
())
<<
"Matrix dimensions are not equal"
;
hl_shrink_col2feature
(
expandFeat
.
getData
(),
channels
,
thisImgHeight
,
thisImgWidth
,
blockH
,
blockW
,
strideH
,
strideW
,
paddingH
,
paddingW
,
outputH
,
outputW
,
getData
(),
alpha
,
beta
);
}
void
GpuMatrix
::
maxPoolForward
(
Matrix
&
inputMat
,
size_t
imgSizeH
,
size_t
imgSizeW
,
...
...
@@ -1775,103 +1700,6 @@ void CpuMatrix::inverse(MatrixPtr& matInv, bool memAlloc) {
CHECK_EQ
(
info
,
0
);
}
void
CpuMatrix
::
convExpand
(
Matrix
&
feature
,
int
feaImgHeight
,
int
feaImgWidth
,
int
channels
,
int
blockH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
)
{
CHECK
(
feature
.
useGpu_
==
false
)
<<
"Matrix type are not equal"
;
CHECK_EQ
(
size_t
(
feaImgHeight
*
feaImgWidth
*
channels
),
feature
.
getHeight
()
*
feature
.
getWidth
())
<<
"Matrix dimensions are not equal"
;
size_t
elemCnt
=
outputH
*
outputW
*
blockH
*
blockW
*
channels
;
CHECK_EQ
(
elemCnt
,
height_
*
width_
)
<<
"Matrix dimensions are not equal"
;
int
channelsCol
=
channels
*
blockH
*
blockW
;
real
*
srcData
=
feature
.
getData
();
for
(
int
c
=
0
;
c
<
channelsCol
;
++
c
)
{
int
wOffset
=
c
%
blockW
;
int
hOffset
=
(
c
/
blockW
)
%
blockH
;
int
c_im
=
c
/
blockH
/
blockW
;
for
(
int
h
=
0
;
h
<
outputH
;
++
h
)
{
for
(
int
w
=
0
;
w
<
outputW
;
++
w
)
{
// no c_im*height to Exclude the channel number
int
imgRowIdx
=
h
*
strideH
+
hOffset
;
int
imgColIdx
=
w
*
strideW
+
wOffset
;
if
((
imgRowIdx
-
paddingH
)
<
0
||
(
imgRowIdx
-
paddingH
)
>=
feaImgHeight
||
(
imgColIdx
-
paddingW
)
<
0
||
(
imgColIdx
-
paddingW
)
>=
feaImgWidth
)
{
data_
[(
c
*
outputH
+
h
)
*
outputW
+
w
]
=
0
;
}
else
{
imgRowIdx
+=
c_im
*
feaImgHeight
-
paddingH
;
imgColIdx
-=
paddingW
;
data_
[(
c
*
outputH
+
h
)
*
outputW
+
w
]
=
srcData
[
imgRowIdx
*
feaImgWidth
+
imgColIdx
];
}
}
}
}
}
void
CpuMatrix
::
convShrink
(
Matrix
&
expandFeat
,
int
thisImgHeight
,
int
thisImgWidth
,
int
channels
,
int
blockH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
,
real
alpha
,
real
beta
)
{
CHECK
(
expandFeat
.
useGpu_
==
false
)
<<
"Matrix type are not equal"
;
CHECK_EQ
(
size_t
(
thisImgHeight
*
thisImgWidth
*
channels
),
getHeight
()
*
getWidth
())
<<
"Matrix dimensions are not equal"
;
size_t
elemCnt
=
outputH
*
outputW
*
blockH
*
blockW
*
channels
;
CHECK
(
elemCnt
==
expandFeat
.
getHeight
()
*
expandFeat
.
getWidth
())
<<
"Matrix dimensions are not equal"
;
real
*
expandData
=
expandFeat
.
getData
();
int
channelsCol
=
channels
*
blockH
*
blockW
;
for
(
int
c
=
0
;
c
<
channelsCol
;
++
c
)
{
int
wOffset
=
c
%
blockW
;
int
hOffset
=
(
c
/
blockW
)
%
blockH
;
int
c_im
=
c
/
blockW
/
blockH
;
for
(
int
h
=
0
;
h
<
outputH
;
++
h
)
{
for
(
int
w
=
0
;
w
<
outputW
;
++
w
)
{
int
imRowIdx
=
h
*
strideH
+
hOffset
;
int
imColIdx
=
w
*
strideW
+
wOffset
;
if
((
imRowIdx
-
paddingH
)
>=
0
&&
(
imRowIdx
-
paddingH
)
<
thisImgHeight
&&
(
imColIdx
-
paddingW
)
>=
0
&&
(
imColIdx
-
paddingW
)
<
thisImgWidth
)
{
imRowIdx
+=
c_im
*
thisImgHeight
-
paddingH
;
imColIdx
-=
paddingW
;
data_
[
imRowIdx
*
thisImgWidth
+
imColIdx
]
=
alpha
*
expandData
[(
c
*
outputH
+
h
)
*
outputW
+
w
]
+
beta
*
data_
[
imRowIdx
*
thisImgWidth
+
imColIdx
];
}
}
}
}
}
void
CpuMatrix
::
maxPoolForward
(
Matrix
&
inputMat
,
size_t
imgSizeH
,
size_t
imgSizeW
,
...
...
paddle/math/Matrix.h
浏览文件 @
09d712d6
...
...
@@ -858,49 +858,6 @@ public:
LOG
(
FATAL
)
<<
"Not implemented"
;
}
/**
* This function is used to calculate the convolution:
*
* It will expand a feature matrix according to the
* convolution filters
*/
virtual
void
convExpand
(
Matrix
&
feature
,
int
feaImgHeight
,
int
feaImgWidth
,
int
channels
,
int
blockH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
)
{
LOG
(
FATAL
)
<<
"Not implemeted"
;
}
/**
* This function is the reverse implementation of convExpand:
*
* Its function is to restore a expanded-matrix into a feature matrix
*/
virtual
void
convShrink
(
Matrix
&
expandColMat
,
int
thisImgHeight
,
int
thisImgWidth
,
int
channels
,
int
blockH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
,
real
alpha
=
1.0
f
,
real
beta
=
0.0
f
)
{
LOG
(
FATAL
)
<<
"Not implemeted"
;
}
/**
* Pooling forward operation, pick out the largest element
* in the sizeX of value
...
...
@@ -1334,34 +1291,6 @@ public:
void
classificationError
(
Matrix
&
output
,
IVector
&
label
,
size_t
topkSize
=
1
);
void
convExpand
(
Matrix
&
feature
,
int
feaImgHeight
,
int
feaImgWidth
,
int
channels
,
int
blockH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
);
void
convShrink
(
Matrix
&
expandColMat
,
int
thisImgHeight
,
int
thisImgWidth
,
int
channels
,
int
blockH
,
int
blochW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingWreal
,
int
outputH
,
int
outputW
,
real
alpha
=
1.0
f
,
real
beta
=
0.0
f
);
void
maxPoolForward
(
Matrix
&
inputMat
,
size_t
imgSizeH
,
size_t
imgSizeW
,
...
...
@@ -1521,34 +1450,6 @@ public:
MatrixPtr
clone
(
size_t
height
,
size_t
width
,
bool
useGpu
=
false
);
void
convExpand
(
Matrix
&
feature
,
int
feaImgHeight
,
int
feaImgWidth
,
int
channels
,
int
blcokH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
);
void
convShrink
(
Matrix
&
expandFeat
,
int
thisImgHeight
,
int
thisImgWidth
,
int
channels
,
int
blockH
,
int
blockW
,
int
strideH
,
int
strideW
,
int
paddingH
,
int
paddingW
,
int
outputH
,
int
outputW
,
real
alpha
=
1.0
f
,
real
beta
=
0.0
f
);
void
maxPoolForward
(
Matrix
&
inputMat
,
size_t
imgSizeH
,
size_t
imgSizeW
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录