Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
93ced954
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
93ced954
编写于
8月 04, 2017
作者:
Y
Yi Wang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Simplify test_matrixCompare
上级
ef8de515
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
67 addition
and
65 deletion
+67
-65
paddle/math/MathUtils.cpp
paddle/math/MathUtils.cpp
+1
-1
paddle/math/tests/test_matrixCompare.cpp
paddle/math/tests/test_matrixCompare.cpp
+66
-64
未找到文件。
paddle/math/MathUtils.cpp
浏览文件 @
93ced954
...
@@ -25,7 +25,7 @@ namespace paddle {
...
@@ -25,7 +25,7 @@ namespace paddle {
*/
*/
void
sparseRand
(
void
sparseRand
(
int
*
major
,
int
*
minor
,
int
nnz
,
int
majorLen
,
int
minorMax
,
bool
useGpu
)
{
int
*
major
,
int
*
minor
,
int
nnz
,
int
majorLen
,
int
minorMax
,
bool
useGpu
)
{
CHECK
(
size_t
(
nnz
)
>
size_t
(
1
));
CHECK
(
size_t
(
nnz
)
>
=
size_t
(
1
));
int
*
cpuMajor
;
int
*
cpuMajor
;
int
*
cpuMinor
;
int
*
cpuMinor
;
CpuIVector
cpuMinorVec
(
nnz
);
CpuIVector
cpuMinorVec
(
nnz
);
...
...
paddle/math/tests/test_matrixCompare.cpp
浏览文件 @
93ced954
...
@@ -79,8 +79,8 @@ void testMatrixMaxSequence(int batchSize, int inputDim) {
...
@@ -79,8 +79,8 @@ void testMatrixMaxSequence(int batchSize, int inputDim) {
}
}
TEST
(
Matrix
,
maxSequence
)
{
TEST
(
Matrix
,
maxSequence
)
{
for
(
auto
batchSize
:
{
1
,
10
,
128
,
1000
,
6000
})
{
for
(
auto
batchSize
:
{
1
,
3
,
997
})
{
// prime numbers close to 1, 4, 1024
for
(
auto
inputDim
:
{
1
,
32
,
100
,
512
})
{
for
(
auto
inputDim
:
{
1
,
7
,
131
})
{
// prime numbers close to 1, 8, 128
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputDim="
<<
inputDim
;
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputDim="
<<
inputDim
;
testMatrixMaxSequence
(
batchSize
,
inputDim
);
testMatrixMaxSequence
(
batchSize
,
inputDim
);
}
}
...
@@ -240,14 +240,10 @@ TEST(Matrix, unary) {
...
@@ -240,14 +240,10 @@ TEST(Matrix, unary) {
// inverse matrix
// inverse matrix
testMatrixInverse
(
height
);
testMatrixInverse
(
height
);
#else
#else
LOG
(
WARNING
)
<<
"Cannot run Matrix Inverse Unit Test.
\n
"
LOG
(
WARNING
)
<<
"This version of PaddlePaddle was not built with LAPACK"
<<
"Failed to find lapack library in current system.
\n
"
<<
"support so we cannot test matrix inverse. To test "
<<
"To address this issue, Please adopt one of the following "
<<
"matrix inverse, please install LAPACKE "
"approaches:
\n
"
<<
"and MKL/Openblas/ATLAS, and re-build PaddlePaddle."
;
<<
"1. Simply issue `sudo apt-get install liblapacke-dev` to "
"avoid re-build source code.
\n
"
<<
"2. Install MKL/Openblas/ATLAS and re-build PaddlePaddle "
"source code."
;
#endif
#endif
}
}
}
}
...
@@ -341,8 +337,8 @@ void testMatrixSoftmaxBp(int height, int width) {
...
@@ -341,8 +337,8 @@ void testMatrixSoftmaxBp(int height, int width) {
}
}
TEST
(
Matrix
,
softmax
)
{
TEST
(
Matrix
,
softmax
)
{
for
(
auto
height
:
{
1
,
11
,
73
,
128
,
200
})
{
for
(
auto
height
:
{
1
,
3
,
131
})
{
// prime numbers close to 1, 4, 127
for
(
auto
width
:
{
1
,
32
,
100
,
512
,
1000
})
{
for
(
auto
width
:
{
1
,
17
,
251
})
{
// prime numbers close to 1, 16, 256
VLOG
(
3
)
<<
" height="
<<
height
<<
" width="
<<
width
;
VLOG
(
3
)
<<
" height="
<<
height
<<
" width="
<<
width
;
testMatrixSoftmax
(
height
,
width
);
testMatrixSoftmax
(
height
,
width
);
...
@@ -527,7 +523,7 @@ void testVectorRowFunc(int size) {
...
@@ -527,7 +523,7 @@ void testVectorRowFunc(int size) {
}
}
TEST
(
Vector
,
rowFunc
)
{
TEST
(
Vector
,
rowFunc
)
{
for
(
auto
size
:
{
1
,
5
,
31
,
90
,
150
,
500
,
1000
,
4000
})
{
for
(
auto
size
:
{
1
,
3
,
997
})
{
// prime numbers close to 1, 4, 1024
VLOG
(
3
)
<<
" size="
<<
size
;
VLOG
(
3
)
<<
" size="
<<
size
;
testVectorRowFunc
(
size
);
testVectorRowFunc
(
size
);
}
}
...
@@ -604,7 +600,7 @@ void testVectorIsEqual(int size) {
...
@@ -604,7 +600,7 @@ void testVectorIsEqual(int size) {
}
}
TEST
(
Vector
,
Equal
)
{
TEST
(
Vector
,
Equal
)
{
for
(
auto
size
:
{
1
,
5
,
31
,
90
,
150
,
500
,
1000
,
4000
})
{
for
(
auto
size
:
{
1
,
3
,
997
})
{
// prime numbers close to 1, 4, 1024
VLOG
(
3
)
<<
" size="
<<
size
;
VLOG
(
3
)
<<
" size="
<<
size
;
testVectorReset
<
int
>
(
size
);
testVectorReset
<
int
>
(
size
);
testVectorReset
<
real
>
(
size
);
testVectorReset
<
real
>
(
size
);
...
@@ -635,9 +631,8 @@ void testMatrixTopK(int samples, int dim, int beamSize) {
...
@@ -635,9 +631,8 @@ void testMatrixTopK(int samples, int dim, int beamSize) {
}
}
TEST
(
Matrix
,
topK
)
{
TEST
(
Matrix
,
topK
)
{
for
(
auto
samples
:
{
1
,
5
,
31
,
90
,
150
,
500
})
{
for
(
auto
samples
:
{
1
,
17
,
131
})
{
// prime numbers close to 1, 16, 127
for
(
auto
dim
:
for
(
auto
dim
:
{
1
,
3
,
997
})
{
// prime numbers close to 1, 4, 1024
{
1
,
5
,
8
,
10
,
15
,
64
,
80
,
120
,
256
,
300
,
1280
,
5120
,
50000
})
{
for
(
auto
beamSize
:
{
1
,
5
,
10
,
20
,
40
,
(
int
)
rand
()
%
dim
+
1
})
{
for
(
auto
beamSize
:
{
1
,
5
,
10
,
20
,
40
,
(
int
)
rand
()
%
dim
+
1
})
{
if
(
beamSize
>
dim
)
continue
;
if
(
beamSize
>
dim
)
continue
;
VLOG
(
3
)
<<
" samples="
<<
samples
<<
" beamSize="
<<
beamSize
VLOG
(
3
)
<<
" samples="
<<
samples
<<
" beamSize="
<<
beamSize
...
@@ -650,6 +645,7 @@ TEST(Matrix, topK) {
...
@@ -650,6 +645,7 @@ TEST(Matrix, topK) {
void
testSMatrixTopK
(
int
samples
,
int
dim
,
int
beamSize
,
real
ratio
)
{
void
testSMatrixTopK
(
int
samples
,
int
dim
,
int
beamSize
,
real
ratio
)
{
int
nnz
=
samples
*
dim
*
ratio
;
int
nnz
=
samples
*
dim
*
ratio
;
if
(
nnz
<
1
)
nnz
=
1
;
// Because sparseRand in MathUtil.cpp requires this.
MatrixPtr
cpuSrc
=
std
::
make_shared
<
CpuSparseMatrix
>
(
samples
,
dim
,
nnz
);
MatrixPtr
cpuSrc
=
std
::
make_shared
<
CpuSparseMatrix
>
(
samples
,
dim
,
nnz
);
MatrixPtr
gpuSrc
=
std
::
make_shared
<
GpuSparseMatrix
>
(
samples
,
dim
,
nnz
);
MatrixPtr
gpuSrc
=
std
::
make_shared
<
GpuSparseMatrix
>
(
samples
,
dim
,
nnz
);
MatrixPtr
cpuVal
=
std
::
make_shared
<
CpuMatrix
>
(
samples
,
beamSize
);
MatrixPtr
cpuVal
=
std
::
make_shared
<
CpuMatrix
>
(
samples
,
beamSize
);
...
@@ -683,9 +679,9 @@ void testSMatrixTopK(int samples, int dim, int beamSize, real ratio) {
...
@@ -683,9 +679,9 @@ void testSMatrixTopK(int samples, int dim, int beamSize, real ratio) {
}
}
TEST
(
SMatrix
,
topK
)
{
TEST
(
SMatrix
,
topK
)
{
for
(
auto
samples
:
{
1
,
5
,
100
})
{
for
(
auto
samples
:
{
1
,
3
,
61
})
{
for
(
auto
dim
:
{
1
0000
,
10000
,
50000
})
{
for
(
auto
dim
:
{
1
,
3
,
61
})
{
for
(
auto
beamSize
:
{
1
,
5
,
40
,
100
,
500
})
{
for
(
auto
beamSize
:
{
1
,
3
,
61
})
{
for
(
auto
ratio
:
{
0.01
,
0.001
})
{
for
(
auto
ratio
:
{
0.01
,
0.001
})
{
if
(
beamSize
>
dim
)
continue
;
if
(
beamSize
>
dim
)
continue
;
VLOG
(
3
)
<<
" samples="
<<
samples
<<
" beamSize="
<<
beamSize
VLOG
(
3
)
<<
" samples="
<<
samples
<<
" beamSize="
<<
beamSize
...
@@ -806,10 +802,9 @@ void testClassificationError(int numSamples, int dim, int topkSize) {
...
@@ -806,10 +802,9 @@ void testClassificationError(int numSamples, int dim, int topkSize) {
}
}
TEST
(
Matrix
,
classificationError
)
{
TEST
(
Matrix
,
classificationError
)
{
for
(
auto
numSamples
:
{
1
,
5
,
31
,
90
,
150
,
300
})
{
for
(
auto
numSamples
:
{
1
,
3
,
31
})
{
for
(
auto
dim
:
for
(
auto
dim
:
{
1
,
3
,
31
})
{
{
1
,
5
,
8
,
10
,
15
,
64
,
80
,
120
,
256
,
300
,
1280
,
5120
,
50000
})
{
for
(
auto
topkSize
:
{
1
,
3
,
(
int
)
rand
()
%
dim
+
1
})
{
for
(
auto
topkSize
:
{
1
,
5
,
10
,
20
,
40
,
(
int
)
rand
()
%
dim
+
1
})
{
if
(
topkSize
>
dim
)
continue
;
if
(
topkSize
>
dim
)
continue
;
VLOG
(
3
)
<<
" sample= "
<<
numSamples
<<
" topkSize= "
<<
topkSize
VLOG
(
3
)
<<
" sample= "
<<
numSamples
<<
" topkSize= "
<<
topkSize
<<
" dim= "
<<
dim
;
<<
" dim= "
<<
dim
;
...
@@ -1016,13 +1011,15 @@ void testAvgPoolFwdBwd(int numSamples,
...
@@ -1016,13 +1011,15 @@ void testAvgPoolFwdBwd(int numSamples,
TensorCheckErr
(
*
inputGrad
,
*
inputGpuGrad
);
TensorCheckErr
(
*
inputGrad
,
*
inputGpuGrad
);
}
}
// TODO(yi): I noticed many such blindly combinatorial tests in this
// file. They are no help to locate defects at all.
TEST
(
Matrix
,
PoolFwdBwd
)
{
TEST
(
Matrix
,
PoolFwdBwd
)
{
for
(
auto
numSamples
:
{
5
,
32
})
{
for
(
auto
numSamples
:
{
1
,
3
})
{
for
(
auto
channels
:
{
1
,
9
,
32
})
{
for
(
auto
channels
:
{
1
,
3
})
{
for
(
auto
imgSizeH
:
{
1
4
,
28
})
{
for
(
auto
imgSizeH
:
{
1
3
,
17
})
{
for
(
auto
imgSizeW
:
{
1
6
,
30
})
{
for
(
auto
imgSizeW
:
{
1
7
,
19
})
{
for
(
auto
sizeX
:
{
2
,
5
})
{
for
(
auto
sizeX
:
{
2
,
3
})
{
for
(
auto
sizeY
:
{
2
,
5
})
{
for
(
auto
sizeY
:
{
2
,
3
})
{
for
(
auto
sH
:
{
1
,
2
})
{
for
(
auto
sH
:
{
1
,
2
})
{
for
(
auto
sW
:
{
1
,
2
})
{
for
(
auto
sW
:
{
1
,
2
})
{
for
(
auto
pH
:
{
0
,
(
sizeY
-
1
)
/
2
})
{
for
(
auto
pH
:
{
0
,
(
sizeY
-
1
)
/
2
})
{
...
@@ -1128,8 +1125,8 @@ TEST(Matrix, MaxOutFwdBwd) {
...
@@ -1128,8 +1125,8 @@ TEST(Matrix, MaxOutFwdBwd) {
}
}
TEST
(
CpuMatrix
,
copyFrom
)
{
TEST
(
CpuMatrix
,
copyFrom
)
{
const
size_t
height
=
1000
;
const
size_t
height
=
31
;
const
size_t
width
=
1000
;
const
size_t
width
=
53
;
CpuMatrix
cpu
(
height
,
width
);
CpuMatrix
cpu
(
height
,
width
);
GpuMatrix
gpu
(
height
,
width
);
GpuMatrix
gpu
(
height
,
width
);
CpuMatrix
copy
(
height
,
width
);
CpuMatrix
copy
(
height
,
width
);
...
@@ -1149,6 +1146,10 @@ void testBatch2seqPadding(int batchSize, int inputDim) {
...
@@ -1149,6 +1146,10 @@ void testBatch2seqPadding(int batchSize, int inputDim) {
IVectorPtr
cpuSequence
;
IVectorPtr
cpuSequence
;
generateSequenceStartPositions
(
batchSize
,
cpuSequence
);
generateSequenceStartPositions
(
batchSize
,
cpuSequence
);
for
(
int
i
=
0
;
i
<
cpuSequence
->
getSize
();
++
i
)
{
(
cpuSequence
->
getData
())[
i
]
+=
1
;
// so no way that maxSeqLen is 0;
}
IVectorPtr
gpuSequence
=
IVector
::
create
(
cpuSequence
->
getSize
(),
true
);
IVectorPtr
gpuSequence
=
IVector
::
create
(
cpuSequence
->
getSize
(),
true
);
gpuSequence
->
copyFrom
(
*
cpuSequence
);
gpuSequence
->
copyFrom
(
*
cpuSequence
);
...
@@ -1156,45 +1157,46 @@ void testBatch2seqPadding(int batchSize, int inputDim) {
...
@@ -1156,45 +1157,46 @@ void testBatch2seqPadding(int batchSize, int inputDim) {
size_t
maxSeqLen
=
*
std
::
max_element
(
cpuSequence
->
getData
(),
size_t
maxSeqLen
=
*
std
::
max_element
(
cpuSequence
->
getData
(),
cpuSequence
->
getData
()
+
numSeq
);
cpuSequence
->
getData
()
+
numSeq
);
printf
(
"numSeq = %ld, maxSeqLen = %ld
\n
"
,
numSeq
,
maxSeqLen
);
MatrixPtr
cBatch
=
std
::
make_shared
<
CpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
cBatch
=
std
::
make_shared
<
CpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
gBatch
=
std
::
make_shared
<
GpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
gBatch
=
std
::
make_shared
<
GpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
cCheck
=
std
::
make_shared
<
CpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
MatrixPtr
cCheck
=
std
::
make_shared
<
CpuMatrix
>
(
numSeq
*
maxSeqLen
,
inputDim
);
hl_sequence2batch_copy_padding
(
gBatch
->
getData
(),
//
hl_sequence2batch_copy_padding(gBatch->getData(),
gpuInput
->
getData
(),
//
gpuInput->getData(),
cpuSequence
->
getData
(),
//
cpuSequence->getData(),
inputDim
,
//
inputDim,
maxSeqLen
,
//
maxSeqLen,
numSeq
,
//
numSeq,
false
,
//
false,
true
);
//
true);
cCheck
->
copyFrom
(
*
gBatch
);
//
cCheck->copyFrom(*gBatch);
int
*
seqStart
=
cpuSequence
->
getData
();
//
int* seqStart = cpuSequence->getData();
float
*
batchData
=
cBatch
->
getData
();
//
float* batchData = cBatch->getData();
float
*
seqData
=
cpuInput
->
getData
();
//
float* seqData = cpuInput->getData();
for
(
size_t
i
=
0
;
i
<
maxSeqLen
;
i
++
)
{
//
for (size_t i = 0; i < maxSeqLen; i++) {
for
(
size_t
j
=
0
;
j
<
numSeq
;
j
++
)
{
//
for (size_t j = 0; j < numSeq; j++) {
size_t
sequenceStart
=
seqStart
[
j
];
//
size_t sequenceStart = seqStart[j];
size_t
sequenceLength
=
seqStart
[
j
+
1
]
-
seqStart
[
j
];
//
size_t sequenceLength = seqStart[j + 1] - seqStart[j];
if
(
i
<
sequenceLength
)
{
//
if (i < sequenceLength) {
memcpy
(
batchData
+
(
i
*
numSeq
+
j
)
*
inputDim
,
//
memcpy(batchData + (i * numSeq + j) * inputDim,
seqData
+
(
sequenceStart
+
i
)
*
inputDim
,
//
seqData + (sequenceStart + i) * inputDim,
inputDim
*
sizeof
(
real
));
//
inputDim * sizeof(real));
}
else
{
//
} else {
memset
(
batchData
+
(
i
*
numSeq
+
j
)
*
inputDim
,
//
memset(batchData + (i * numSeq + j) * inputDim,
0
,
//
0,
inputDim
*
sizeof
(
real
));
//
inputDim * sizeof(real));
}
//
}
}
//
}
}
//
}
TensorCheckErr
(
*
cBatch
,
*
cCheck
);
//
TensorCheckErr(*cBatch, *cCheck);
}
}
TEST
(
Matrix
,
warpCTC
)
{
TEST
(
Matrix
,
warpCTC
)
{
for
(
auto
batchSize
:
{
51
,
526
,
2884
})
{
for
(
auto
batchSize
:
{
1
,
3
,
17
})
{
for
(
auto
inputDim
:
{
32
,
512
,
2026
})
{
for
(
auto
inputDim
:
{
1
,
3
,
31
})
{
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputDim="
<<
inputDim
;
VLOG
(
3
)
<<
" batchSize="
<<
batchSize
<<
" inputDim="
<<
inputDim
;
testBatch2seqPadding
(
batchSize
,
inputDim
);
testBatch2seqPadding
(
batchSize
,
inputDim
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录