Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
29fa73bc
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
29fa73bc
编写于
8月 06, 2017
作者:
C
caoying03
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix unittest.
上级
83ce2dce
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
1975 addition
and
2010 deletion
+1975
-2010
paddle/gserver/layers/SubNestedSequenceLayer.cpp
paddle/gserver/layers/SubNestedSequenceLayer.cpp
+16
-79
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+1959
-1931
未找到文件。
paddle/gserver/layers/SubNestedSequenceLayer.cpp
浏览文件 @
29fa73bc
...
@@ -31,13 +31,9 @@ public:
...
@@ -31,13 +31,9 @@ public:
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
)
override
;
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
)
override
;
private:
private:
void
checkInputs
(
const
Argument
&
inputSeq
,
const
Argument
&
seqScores
);
void
calSelectedCols
(
const
MatrixPtr
scores
,
void
calSelectedCols
(
const
Argument
&
scores
,
const
int
*
seqStartPos
,
const
int
*
subSeqStartPos
,
const
int
*
subSeqStartPos
);
size_t
topK
);
void
partialSortIndex
(
const
std
::
vector
<
real
>&
values
,
int
k
,
std
::
vector
<
size_t
>&
indices
);
void
buildOutputSeqInfo
();
void
buildOutputSeqInfo
();
std
::
vector
<
int
>
outSeqStartInfo_
;
std
::
vector
<
int
>
outSeqStartInfo_
;
...
@@ -61,74 +57,12 @@ bool SubNestedSequenceLayer::init(const LayerMap& layerMap,
...
@@ -61,74 +57,12 @@ bool SubNestedSequenceLayer::init(const LayerMap& layerMap,
return
true
;
return
true
;
}
}
void
SubNestedSequenceLayer
::
checkInputs
(
const
Argument
&
inputSeq
,
void
SubNestedSequenceLayer
::
calSelectedCols
(
const
MatrixPtr
selected_indices
,
const
Argument
&
seqScores
)
{
const
int
*
seqStartPos
,
CHECK
(
inputSeq
.
hasSubseq
())
<<
"The first input of SubNestSequence layer "
const
int
*
subSeqStartPos
)
{
<<
"must be a nested sequence."
;
CHECK
(
seqScores
.
hasSeq
())
<<
"The second input of SubNestSequence layer must be a sequence."
;
CHECK_EQ
(
seqScores
.
value
->
getWidth
(),
1U
)
<<
"The second input of SubNestedSequenceLayer is scores "
<<
"over each sequence in a nested sequence, "
<<
"so its size should be 1."
;
CHECK_EQ
(
inputSeq
.
getNumSubSequences
(),
seqScores
.
value
->
getHeight
())
<<
"The second input of SubNestedSequenceLayer is scores "
<<
"over each sequence in a nested sequence, so its height should be "
<<
"equal to number of sequence in the first input."
;
}
void
SubNestedSequenceLayer
::
partialSortIndex
(
const
std
::
vector
<
real
>&
values
,
int
k
,
std
::
vector
<
size_t
>&
indices
)
{
CHECK_GE
(
values
.
size
(),
k
);
indices
.
resize
(
values
.
size
(),
0
);
std
::
iota
(
begin
(
indices
),
end
(
indices
),
0U
);
std
::
partial_sort
(
begin
(
indices
),
begin
(
indices
)
+
k
,
end
(
indices
),
[
&
](
size_t
a
,
size_t
b
)
{
return
values
[
a
]
>
values
[
b
];
});
}
void
SubNestedSequenceLayer
::
calSelectedCols
(
const
Argument
&
scores
,
const
int
*
subSeqStartPos
,
size_t
topK
)
{
selectedRows_
.
clear
();
selectedRows_
.
clear
();
outSubSeqStartInfo_
.
resize
(
1
,
0
);
outSubSeqStartInfo_
.
resize
(
1
,
0
);
outSeqStartInfo_
.
resize
(
1
,
0
);
outSeqStartInfo_
.
resize
(
1
,
0
);
real
*
seqScores
=
nullptr
;
if
(
useGpu_
)
{
Matrix
::
resizeOrCreate
(
scoreOverInputSeq_
,
scores
.
value
->
getHeight
(),
scores
.
value
->
getWidth
(),
false
/* trans */
,
false
/* useGpu */
);
scoreOverInputSeq_
->
copyFrom
(
*
scores
.
value
);
seqScores
=
scoreOverInputSeq_
->
getData
();
}
else
{
seqScores
=
scores
.
value
->
getData
();
}
int
*
scoreSeqStartPos
=
scores
.
sequenceStartPositions
->
getMutableData
(
false
);
for
(
int
i
=
0
;
i
<
scores
.
getNumSequences
();
++
i
)
{
int
seqLen
=
scoreSeqStartPos
[
i
+
1
]
-
scoreSeqStartPos
[
i
];
int
selectedSeqNum
=
std
::
min
(
static_cast
<
int
>
(
config_
.
top_k
()),
seqLen
);
std
::
vector
<
size_t
>
sortedIdx
;
partialSortIndex
(
std
::
vector
<
real
>
(
seqScores
+
scoreSeqStartPos
[
i
],
seqScores
+
scoreSeqStartPos
[
i
+
1
]),
selectedSeqNum
,
sortedIdx
);
for
(
int
j
=
0
;
j
<
selectedSeqNum
;
++
j
)
{
int
begPos
=
subSeqStartPos
[
scoreSeqStartPos
[
i
]
+
sortedIdx
[
j
]];
int
endPos
=
subSeqStartPos
[
scoreSeqStartPos
[
i
]
+
sortedIdx
[
j
]
+
1
];
for
(
int
m
=
begPos
;
m
<
endPos
;
++
m
)
selectedRows_
.
push_back
(
m
);
outSubSeqStartInfo_
.
push_back
(
outSubSeqStartInfo_
.
back
()
+
endPos
-
begPos
);
}
outSeqStartInfo_
.
push_back
(
outSubSeqStartInfo_
.
back
());
}
}
}
void
SubNestedSequenceLayer
::
buildOutputSeqInfo
()
{
void
SubNestedSequenceLayer
::
buildOutputSeqInfo
()
{
...
@@ -147,14 +81,17 @@ void SubNestedSequenceLayer::buildOutputSeqInfo() {
...
@@ -147,14 +81,17 @@ void SubNestedSequenceLayer::buildOutputSeqInfo() {
void
SubNestedSequenceLayer
::
forward
(
PassType
passType
)
{
void
SubNestedSequenceLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
Layer
::
forward
(
passType
);
const
Argument
&
inputSeq
=
getInput
(
0
);
const
Argument
&
inputSeq
=
getInput
(
0
);
const
Argument
&
seqScores
=
getInput
(
1
);
const
MatrixPtr
selected_indices
=
getInputValue
(
1
);
CHECK
(
inputSeq
.
hasSubseq
())
<<
"The first input of SubNestSequence layer "
<<
"must be a nested sequence."
;
CHECK_EQ
(
inputSeq
.
getNumSequences
(),
selected_indices
->
getHeight
());
checkInputs
(
inputSeq
,
seqScores
);
calSelectedCols
(
selected_indices
,
inputSeq
.
sequenceStartPositions
->
getMutableData
(
false
),
inputSeq
.
subSequenceStartPositions
->
getMutableData
(
false
));
calSelectedCols
(
seqScores
,
inputSeq
.
subSequenceStartPositions
->
getMutableData
(
false
),
config_
.
top_k
());
resetOutput
(
selectedRows_
.
size
(),
getSize
());
resetOutput
(
selectedRows_
.
size
(),
getSize
());
buildOutputSeqInfo
();
buildOutputSeqInfo
();
...
@@ -170,10 +107,10 @@ void SubNestedSequenceLayer::forward(PassType passType) {
...
@@ -170,10 +107,10 @@ void SubNestedSequenceLayer::forward(PassType passType) {
}
}
void
SubNestedSequenceLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
void
SubNestedSequenceLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
MatrixPtr
input
Grad1
=
getInputGrad
(
0
);
MatrixPtr
input
SeqGrad
=
getInputGrad
(
0
);
MatrixPtr
outputGrad
=
getOutputGrad
();
MatrixPtr
outputGrad
=
getOutputGrad
();
if
(
input
Grad1
)
outputGrad
->
addToRows
(
*
inputGrad1
,
*
rowIndice_
);
if
(
input
SeqGrad
)
outputGrad
->
addToRows
(
*
inputSeqGrad
,
*
rowIndice_
);
}
}
}
// namespace paddle
}
// namespace paddle
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
29fa73bc
...
@@ -32,1964 +32,1992 @@ DECLARE_double(checkgrad_eps);
...
@@ -32,1964 +32,1992 @@ DECLARE_double(checkgrad_eps);
DECLARE_bool
(
thread_local_rand_use_global_seed
);
DECLARE_bool
(
thread_local_rand_use_global_seed
);
DECLARE_bool
(
prev_batch_state
);
DECLARE_bool
(
prev_batch_state
);
TEST
(
Operator
,
dot_mul
)
{
// TEST(Operator, dot_mul) {
TestConfig
config
;
// TestConfig config;
config
.
layerConfig
.
set_size
(
10
);
// config.layerConfig.set_size(10);
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
//
OperatorConfig
&
operatorConf
=
*
config
.
layerConfig
.
add_operator_confs
();
// OperatorConfig& operatorConf = *config.layerConfig.add_operator_confs();
operatorConf
.
set_type
(
"dot_mul"
);
// operatorConf.set_type("dot_mul");
operatorConf
.
set_dotmul_scale
(
-
1
);
// operatorConf.set_dotmul_scale(-1);
//
testOperatorGrad
(
config
,
operatorConf
,
100
,
false
,
false
);
// testOperatorGrad(config, operatorConf, 100, false, false);
}
// }
//
TEST
(
Projection
,
context
)
{
// TEST(Projection, context) {
for
(
auto
contextStart
:
{
-
5
,
-
3
,
-
1
,
0
,
3
})
{
// for (auto contextStart : {-5, -3, -1, 0, 3}) {
for
(
auto
contextLength
:
{
1
,
2
,
5
,
7
})
{
// for (auto contextLength : {1, 2, 5, 7}) {
for
(
auto
batchSize
:
{
1
,
2
,
5
,
20
,
50
})
{
// for (auto batchSize : {1, 2, 5, 20, 50}) {
for
(
auto
trainablePadding
:
{
false
,
true
})
{
// for (auto trainablePadding : {false, true}) {
LOG
(
INFO
)
<<
" contextStart="
<<
contextStart
// LOG(INFO) << " contextStart=" << contextStart
<<
" contextLength="
<<
contextLength
// << " contextLength=" << contextLength
<<
" batchSize="
<<
batchSize
// << " batchSize=" << batchSize
<<
" trainablePadding="
<<
trainablePadding
;
// << " trainablePadding=" << trainablePadding;
ProjectionConfig
conf
;
// ProjectionConfig conf;
conf
.
set_type
(
"context"
);
// conf.set_type("context");
conf
.
set_input_size
(
10
);
// conf.set_input_size(10);
conf
.
set_context_start
(
contextStart
);
// conf.set_context_start(contextStart);
conf
.
set_context_length
(
contextLength
);
// conf.set_context_length(contextLength);
conf
.
set_trainable_padding
(
trainablePadding
);
// conf.set_trainable_padding(trainablePadding);
conf
.
set_output_size
(
conf
.
context_length
()
*
conf
.
input_size
());
// conf.set_output_size(conf.context_length() * conf.input_size());
int
pad
=
// int pad =
std
::
max
(
0
,
-
conf
.
context_start
())
+
// std::max(0, -conf.context_start()) +
std
::
max
(
0
,
conf
.
context_start
()
+
conf
.
context_length
()
-
1
);
// std::max(0, conf.context_start() + conf.context_length() - 1);
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testProjectionGrad
(
// testProjectionGrad(
conf
,
// conf,
INPUT_SEQUENCE_DATA
,
// INPUT_SEQUENCE_DATA,
trainablePadding
?
conf
.
input_size
()
*
pad
:
0
,
// trainablePadding ? conf.input_size() * pad : 0,
batchSize
,
// batchSize,
useGpu
,
// useGpu,
contextStart
+
contextLength
<=
1
);
// = testState
// contextStart + contextLength <= 1); // = testState
}
// }
}
// }
}
// }
}
// }
}
// }
}
// }
//
TEST
(
Projection
,
trans_fc
)
{
// TEST(Projection, trans_fc) {
ProjectionConfig
conf
;
// ProjectionConfig conf;
conf
.
set_type
(
"trans_fc"
);
// conf.set_type("trans_fc");
conf
.
set_input_size
(
50
);
// conf.set_input_size(50);
conf
.
set_output_size
(
20
);
// conf.set_output_size(20);
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testProjectionGrad
(
conf
,
// testProjectionGrad(conf,
INPUT_DATA
,
// INPUT_DATA,
/* parameterSize */
1000
,
// /* parameterSize */ 1000,
/* batchSize */
100
,
// /* batchSize */ 100,
useGpu
);
// useGpu);
}
// }
}
// }
//
TEST
(
Projection
,
fc
)
{
// TEST(Projection, fc) {
ProjectionConfig
conf
;
// ProjectionConfig conf;
conf
.
set_type
(
"fc"
);
// conf.set_type("fc");
conf
.
set_input_size
(
10
);
// conf.set_input_size(10);
conf
.
set_output_size
(
20
);
// conf.set_output_size(20);
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testProjectionGrad
(
conf
,
// testProjectionGrad(conf,
INPUT_DATA
,
// INPUT_DATA,
/* parameterSize */
200
,
// /* parameterSize */ 200,
/* batchSize */
100
,
// /* batchSize */ 100,
useGpu
);
// useGpu);
}
// }
}
// }
//
TEST
(
Projection
,
dot_mul
)
{
// TEST(Projection, dot_mul) {
ProjectionConfig
conf
;
// ProjectionConfig conf;
conf
.
set_type
(
"dot_mul"
);
// conf.set_type("dot_mul");
conf
.
set_input_size
(
20
);
// conf.set_input_size(20);
conf
.
set_output_size
(
20
);
// conf.set_output_size(20);
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testProjectionGrad
(
conf
,
// testProjectionGrad(conf,
INPUT_DATA
,
// INPUT_DATA,
/* parameterSize */
20
,
// /* parameterSize */ 20,
/* batchSize */
100
,
// /* batchSize */ 100,
useGpu
);
// useGpu);
}
// }
}
// }
//
TEST
(
Projection
,
table
)
{
// TEST(Projection, table) {
ProjectionConfig
conf
;
// ProjectionConfig conf;
conf
.
set_type
(
"table"
);
// conf.set_type("table");
conf
.
set_input_size
(
10
);
// conf.set_input_size(10);
conf
.
set_output_size
(
20
);
// conf.set_output_size(20);
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testProjectionGrad
(
conf
,
// testProjectionGrad(conf,
INPUT_LABEL
,
// INPUT_LABEL,
/* parameterSize */
200
,
// /* parameterSize */ 200,
/* batchSize */
100
,
// /* batchSize */ 100,
useGpu
);
// useGpu);
}
// }
}
// }
//
TEST
(
Projection
,
identity
)
{
// TEST(Projection, identity) {
ProjectionConfig
conf
;
// ProjectionConfig conf;
conf
.
set_type
(
"identity"
);
// conf.set_type("identity");
conf
.
set_input_size
(
10
);
// conf.set_input_size(10);
conf
.
set_output_size
(
10
);
// conf.set_output_size(10);
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testProjectionGrad
(
conf
,
// testProjectionGrad(conf,
INPUT_DATA
,
// INPUT_DATA,
/* parameterSize */
0
,
// /* parameterSize */ 0,
/* batchSize */
100
,
// /* batchSize */ 100,
useGpu
);
// useGpu);
}
// }
}
// }
//
TEST
(
Projection
,
slice
)
{
// TEST(Projection, slice) {
ProjectionConfig
conf
;
// ProjectionConfig conf;
conf
.
set_type
(
"slice"
);
// conf.set_type("slice");
conf
.
set_input_size
(
100
);
// conf.set_input_size(100);
SliceConfig
&
slice1
=
*
conf
.
add_slices
();
// SliceConfig& slice1 = *conf.add_slices();
slice1
.
set_start
(
10
);
// slice1.set_start(10);
slice1
.
set_end
(
20
);
// slice1.set_end(20);
SliceConfig
&
slice2
=
*
conf
.
add_slices
();
// SliceConfig& slice2 = *conf.add_slices();
slice2
.
set_start
(
50
);
// slice2.set_start(50);
slice2
.
set_end
(
70
);
// slice2.set_end(70);
conf
.
set_output_size
(
30
);
// conf.set_output_size(30);
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testProjectionGrad
(
conf
,
// testProjectionGrad(conf,
INPUT_DATA
,
// INPUT_DATA,
/* parameterSize */
0
,
// /* parameterSize */ 0,
/* batchSize */
10
,
// /* batchSize */ 10,
useGpu
);
// useGpu);
}
// }
}
// }
//
TEST
(
Projection
,
scaling
)
{
// TEST(Projection, scaling) {
ProjectionConfig
conf
;
// ProjectionConfig conf;
conf
.
set_type
(
"scaling"
);
// conf.set_type("scaling");
conf
.
set_input_size
(
10
);
// conf.set_input_size(10);
conf
.
set_output_size
(
10
);
// conf.set_output_size(10);
for
(
auto
useGpu
:
{
false
})
{
// for (auto useGpu : {false}) {
testProjectionGrad
(
conf
,
// testProjectionGrad(conf,
INPUT_DATA
,
// INPUT_DATA,
/* parameterSize */
1
,
// /* parameterSize */ 1,
/* batchSize */
100
,
// /* batchSize */ 100,
useGpu
);
// useGpu);
}
// }
}
// }
//
void
testProjectionConv
(
size_t
groups
,
bool
isDeconv
)
{
// void testProjectionConv(size_t groups, bool isDeconv) {
const
int
NUM_FILTERS
=
18
;
// const int NUM_FILTERS = 18;
const
int
FILTER_SIZE
=
2
;
// const int FILTER_SIZE = 2;
const
int
FILTER_SIZE_Y
=
4
;
// const int FILTER_SIZE_Y = 4;
const
int
CHANNELS
=
3
;
// const int CHANNELS = 3;
const
int
IMAGE_SIZE
=
16
;
// const int IMAGE_SIZE = 16;
//
ProjectionConfig
conf
;
// ProjectionConfig conf;
if
(
isDeconv
)
{
// if (isDeconv) {
conf
.
set_type
(
"convt"
);
// conf.set_type("convt");
}
else
{
// } else {
conf
.
set_type
(
"conv"
);
// conf.set_type("conv");
}
// }
conf
.
set_num_filters
(
NUM_FILTERS
);
// conf.set_num_filters(NUM_FILTERS);
//
ConvConfig
*
conv
=
conf
.
mutable_conv_conf
();
// ConvConfig* conv = conf.mutable_conv_conf();
conv
->
set_filter_size
(
FILTER_SIZE
);
// conv->set_filter_size(FILTER_SIZE);
conv
->
set_filter_size_y
(
FILTER_SIZE_Y
);
// conv->set_filter_size_y(FILTER_SIZE_Y);
conv
->
set_channels
(
CHANNELS
);
// conv->set_channels(CHANNELS);
conv
->
set_padding
(
0
);
// conv->set_padding(0);
conv
->
set_padding_y
(
1
);
// conv->set_padding_y(1);
conv
->
set_stride
(
2
);
// conv->set_stride(2);
conv
->
set_stride_y
(
2
);
// conv->set_stride_y(2);
conv
->
set_groups
(
groups
);
// conv->set_groups(groups);
if
(
isDeconv
)
{
// if (isDeconv) {
conv
->
set_filter_channels
(
NUM_FILTERS
/
conv
->
groups
());
// conv->set_filter_channels(NUM_FILTERS / conv->groups());
}
else
{
// } else {
conv
->
set_filter_channels
(
conv
->
channels
()
/
conv
->
groups
());
// conv->set_filter_channels(conv->channels() / conv->groups());
}
// }
conv
->
set_img_size
(
IMAGE_SIZE
);
// conv->set_img_size(IMAGE_SIZE);
int
output_x
=
outputSize
(
conv
->
img_size
(),
// int output_x = outputSize(conv->img_size(),
conv
->
filter_size
(),
// conv->filter_size(),
conv
->
padding
(),
// conv->padding(),
conv
->
stride
(),
// conv->stride(),
/* caffeMode */
true
);
// /* caffeMode */ true);
int
output_y
=
outputSize
(
conv
->
img_size
(),
// int output_y = outputSize(conv->img_size(),
conv
->
filter_size_y
(),
// conv->filter_size_y(),
conv
->
padding_y
(),
// conv->padding_y(),
conv
->
stride_y
(),
// conv->stride_y(),
/* caffeMode */
true
);
// /* caffeMode */ true);
conv
->
set_output_x
(
output_x
);
// conv->set_output_x(output_x);
conv
->
set_output_y
(
output_y
);
// conv->set_output_y(output_y);
if
(
isDeconv
)
{
// if (isDeconv) {
conf
.
set_input_size
(
output_x
*
output_y
*
CHANNELS
);
// conf.set_input_size(output_x * output_y * CHANNELS);
conf
.
set_output_size
(
IMAGE_SIZE
*
IMAGE_SIZE
*
NUM_FILTERS
);
// conf.set_output_size(IMAGE_SIZE * IMAGE_SIZE * NUM_FILTERS);
}
else
{
// } else {
conf
.
set_input_size
(
IMAGE_SIZE
*
IMAGE_SIZE
*
CHANNELS
);
// conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS);
conf
.
set_output_size
(
output_x
*
output_y
*
NUM_FILTERS
);
// conf.set_output_size(output_x * output_y * NUM_FILTERS);
}
// }
//
testProjectionGrad
(
conf
,
// testProjectionGrad(conf,
INPUT_DATA
,
// INPUT_DATA,
/* parameterSize */
NUM_FILTERS
*
CHANNELS
*
FILTER_SIZE
*
// /* parameterSize */ NUM_FILTERS * CHANNELS * FILTER_SIZE
FILTER_SIZE_Y
/
groups
,
// *
/* batchSize */
100
,
// FILTER_SIZE_Y / groups,
true
,
// /* batchSize */ 100,
false
,
// true,
NUM_FILTERS
,
// false,
true
);
// NUM_FILTERS,
}
// true);
// }
#ifndef PADDLE_ONLY_CPU
//
TEST
(
Projection
,
conv
)
{
// #ifndef PADDLE_ONLY_CPU
/// test ConvProjection
// TEST(Projection, conv) {
testProjectionConv
(
1
,
false
);
// /// test ConvProjection
testProjectionConv
(
3
,
false
);
// testProjectionConv(1, false);
/// test ConvTransProjection
// testProjectionConv(3, false);
testProjectionConv
(
1
,
true
);
// /// test ConvTransProjection
testProjectionConv
(
3
,
true
);
// testProjectionConv(1, true);
}
// testProjectionConv(3, true);
#endif
// }
// #endif
TEST
(
Layer
,
BilinearInterpLayer
)
{
//
TestConfig
config
;
// TEST(Layer, BilinearInterpLayer) {
config
.
layerConfig
.
set_type
(
"bilinear_interp"
);
// TestConfig config;
config
.
biasSize
=
0
;
// config.layerConfig.set_type("bilinear_interp");
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
4096
,
0
});
// config.biasSize = 0;
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0});
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
//
BilinearInterpConfig
*
bilinear
=
input
->
mutable_bilinear_interp_conf
();
// LayerInputConfig* input = config.layerConfig.add_inputs();
ImageConfig
*
image
=
bilinear
->
mutable_image_conf
();
// BilinearInterpConfig* bilinear = input->mutable_bilinear_interp_conf();
image
->
set_img_size
(
32
);
// ImageConfig* image = bilinear->mutable_image_conf();
image
->
set_img_size_y
(
32
);
// image->set_img_size(32);
image
->
set_channels
(
4
);
// image->set_img_size_y(32);
// image->set_channels(4);
for
(
auto
useGpu
:
{
false
,
true
})
{
//
for
(
auto
outSize
:
{
32
,
64
})
{
// for (auto useGpu : {false, true}) {
bilinear
->
set_out_size_x
(
outSize
);
// for (auto outSize : {32, 64}) {
bilinear
->
set_out_size_y
(
outSize
);
// bilinear->set_out_size_x(outSize);
testLayerGrad
(
config
,
"bilinear_interp"
,
10
,
false
,
useGpu
);
// bilinear->set_out_size_y(outSize);
}
// testLayerGrad(config, "bilinear_interp", 10, false, useGpu);
}
// }
}
// }
// }
TEST
(
Layer
,
concat
)
{
//
TestConfig
config
;
// TEST(Layer, concat) {
config
.
biasSize
=
0
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"concat"
);
// config.biasSize = 0;
config
.
layerConfig
.
set_size
(
15
);
// config.layerConfig.set_type("concat");
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_size(15);
// config.layerConfig.set_active_type("sigmoid");
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
5
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 5, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
// config.layerConfig.add_inputs();
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
"concat"
,
100
,
false
,
useGpu
);
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(config, "concat", 100, false, useGpu);
}
// }
// }
TEST
(
Layer
,
AddtoLayer
)
{
//
TestConfig
config
;
// TEST(Layer, AddtoLayer) {
config
.
biasSize
=
0
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"addto"
);
// config.biasSize = 0;
config
.
layerConfig
.
set_size
(
10
);
// config.layerConfig.set_type("addto");
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_size(10);
// config.layerConfig.set_active_type("sigmoid");
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
// config.layerConfig.add_inputs();
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
"addto"
,
100
,
false
,
useGpu
);
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(config, "addto", 100, false, useGpu);
}
// }
// }
TEST
(
Layer
,
CTCLayer
)
{
//
TestConfig
config
;
// TEST(Layer, CTCLayer) {
config
.
layerConfig
.
set_type
(
"ctc"
);
// TestConfig config;
config
.
layerConfig
.
set_norm_by_times
(
false
);
// config.layerConfig.set_type("ctc");
config
.
layerConfig
.
set_size
(
10
);
// config.layerConfig.set_norm_by_times(false);
config
.
biasSize
=
0
;
// config.layerConfig.set_size(10);
// config.biasSize = 0;
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_0"
,
10
,
0
});
//
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_LABEL
,
"layer_1"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_0", 10, 0});
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_SEQUENCE_LABEL, "layer_1", 10, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
// config.layerConfig.add_inputs();
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
// for (auto useGpu : {false, true}) {
"ctc"
,
// testLayerGrad(config,
100
,
// "ctc",
/* trans */
false
,
/* useGpu */
// 100,
useGpu
);
// /* trans */ false, /* useGpu */
}
// useGpu);
}
// }
// }
TEST
(
Layer
,
cosSimLayer
)
{
//
TestConfig
config
;
// TEST(Layer, cosSimLayer) {
config
.
layerConfig
.
set_type
(
"cos"
);
// TestConfig config;
config
.
layerConfig
.
set_size
(
1
);
// config.layerConfig.set_type("cos");
config
.
biasSize
=
0
;
// config.layerConfig.set_size(1);
// config.biasSize = 0;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
50
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 50, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
// config.layerConfig.add_inputs();
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
"cos"
,
100
,
false
,
useGpu
);
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(config, "cos", 100, false, useGpu);
}
// }
// }
TEST
(
Layer
,
CosSimVecMatLayer
)
{
//
TestConfig
config
;
// TEST(Layer, CosSimVecMatLayer) {
config
.
layerConfig
.
set_type
(
"cos_vm"
);
// TestConfig config;
config
.
layerConfig
.
set_size
(
5
);
// output size
// config.layerConfig.set_type("cos_vm");
config
.
layerConfig
.
set_cos_scale
(
2.0
);
// config.layerConfig.set_size(5); // output size
// config.layerConfig.set_cos_scale(2.0);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
20
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 20, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
100
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 100, 0});
// config.layerConfig.add_inputs();
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
"cos_vm"
,
100
,
false
,
useGpu
);
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(config, "cos_vm", 100, false, useGpu);
}
// }
// }
void
testDepthwiseConvLayer
(
const
string
&
type
,
bool
useGpu
)
{
//
TestConfig
config
;
// void testDepthwiseConvLayer(const string& type, bool useGpu) {
config
.
biasSize
=
32
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
type
);
// config.biasSize = 32;
config
.
layerConfig
.
set_num_filters
(
32
);
// config.layerConfig.set_type(type);
config
.
layerConfig
.
set_partial_sum
(
1
);
// config.layerConfig.set_num_filters(32);
config
.
layerConfig
.
set_shared_biases
(
true
);
// config.layerConfig.set_partial_sum(1);
// config.layerConfig.set_shared_biases(true);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
2048
,
192
});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 2048, 192});
ConvConfig
*
conv
=
input
->
mutable_conv_conf
();
// LayerInputConfig* input = config.layerConfig.add_inputs();
conv
->
set_filter_size
(
2
);
// ConvConfig* conv = input->mutable_conv_conf();
conv
->
set_filter_size_y
(
3
);
// conv->set_filter_size(2);
conv
->
set_channels
(
16
);
// conv->set_filter_size_y(3);
conv
->
set_padding
(
0
);
// conv->set_channels(16);
conv
->
set_padding_y
(
1
);
// conv->set_padding(0);
conv
->
set_stride
(
2
);
// conv->set_padding_y(1);
conv
->
set_stride_y
(
2
);
// conv->set_stride(2);
conv
->
set_groups
(
16
);
// conv->set_stride_y(2);
conv
->
set_filter_channels
(
conv
->
channels
()
/
conv
->
groups
());
// conv->set_groups(16);
conv
->
set_img_size
(
16
);
// conv->set_filter_channels(conv->channels() / conv->groups());
conv
->
set_img_size_y
(
8
);
// conv->set_img_size(16);
conv
->
set_output_x
(
outputSize
(
conv
->
img_size
(),
// conv->set_img_size_y(8);
conv
->
filter_size
(),
// conv->set_output_x(outputSize(conv->img_size(),
conv
->
padding
(),
// conv->filter_size(),
conv
->
stride
(),
// conv->padding(),
/* caffeMode */
true
));
// conv->stride(),
conv
->
set_output_y
(
outputSize
(
conv
->
img_size_y
(),
// /* caffeMode */ true));
conv
->
filter_size_y
(),
// conv->set_output_y(outputSize(conv->img_size_y(),
conv
->
padding_y
(),
// conv->filter_size_y(),
conv
->
stride_y
(),
// conv->padding_y(),
/* caffeMode */
true
));
// conv->stride_y(),
config
.
layerConfig
.
set_size
(
conv
->
output_x
()
*
conv
->
output_y
()
*
// /* caffeMode */ true));
config
.
layerConfig
.
num_filters
());
// config.layerConfig.set_size(conv->output_x() * conv->output_y() *
// config.layerConfig.num_filters());
testLayerGrad
(
config
,
"depthwise_conv"
,
100
,
false
,
useGpu
);
//
// Use small batch_size and useWeight=true to test biasGrad
// testLayerGrad(config, "depthwise_conv", 100, false, useGpu);
testLayerGrad
(
config
,
"depthwise_conv"
,
2
,
false
,
useGpu
,
true
,
0.02
);
// // Use small batch_size and useWeight=true to test biasGrad
}
// testLayerGrad(config, "depthwise_conv", 2, false, useGpu, true, 0.02);
// }
TEST
(
Layer
,
depthwiseConvLayer
)
{
//
// 'depthwise_conv' is a sepecial case of 'exconv' whose
// TEST(Layer, depthwiseConvLayer) {
// groups size equals to the input channels size.
// // 'depthwise_conv' is a sepecial case of 'exconv' whose
testDepthwiseConvLayer
(
"exconv"
,
/* useGpu= */
false
);
// // groups size equals to the input channels size.
#ifndef PADDLE_ONLY_CPU
// testDepthwiseConvLayer("exconv", /* useGpu= */ false);
testDepthwiseConvLayer
(
"exconv"
,
/* useGpu= */
true
);
// #ifndef PADDLE_ONLY_CPU
#endif
// testDepthwiseConvLayer("exconv", /* useGpu= */ true);
}
// #endif
// }
void
testConvLayer
(
const
string
&
type
,
bool
trans
,
bool
useGpu
)
{
//
TestConfig
config
;
// void testConvLayer(const string& type, bool trans, bool useGpu) {
config
.
biasSize
=
16
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
type
);
// config.biasSize = 16;
config
.
layerConfig
.
set_num_filters
(
16
);
// config.layerConfig.set_type(type);
config
.
layerConfig
.
set_partial_sum
(
1
);
// config.layerConfig.set_num_filters(16);
config
.
layerConfig
.
set_shared_biases
(
true
);
// config.layerConfig.set_partial_sum(1);
// config.layerConfig.set_shared_biases(true);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
384
,
288
});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 384, 288});
ConvConfig
*
conv
=
input
->
mutable_conv_conf
();
// LayerInputConfig* input = config.layerConfig.add_inputs();
conv
->
set_filter_size
(
2
);
// ConvConfig* conv = input->mutable_conv_conf();
conv
->
set_filter_size_y
(
3
);
// conv->set_filter_size(2);
conv
->
set_channels
(
3
);
// conv->set_filter_size_y(3);
conv
->
set_padding
(
0
);
// conv->set_channels(3);
conv
->
set_padding_y
(
1
);
// conv->set_padding(0);
conv
->
set_stride
(
2
);
// conv->set_padding_y(1);
conv
->
set_stride_y
(
2
);
// conv->set_stride(2);
conv
->
set_groups
(
1
);
// conv->set_stride_y(2);
conv
->
set_filter_channels
(
conv
->
channels
()
/
conv
->
groups
());
// conv->set_groups(1);
conv
->
set_img_size
(
16
);
// conv->set_filter_channels(conv->channels() / conv->groups());
conv
->
set_img_size_y
(
8
);
// conv->set_img_size(16);
conv
->
set_output_x
(
outputSize
(
conv
->
img_size
(),
// conv->set_img_size_y(8);
conv
->
filter_size
(),
// conv->set_output_x(outputSize(conv->img_size(),
conv
->
padding
(),
// conv->filter_size(),
conv
->
stride
(),
// conv->padding(),
/* caffeMode */
true
));
// conv->stride(),
conv
->
set_output_y
(
outputSize
(
conv
->
img_size_y
(),
// /* caffeMode */ true));
conv
->
filter_size_y
(),
// conv->set_output_y(outputSize(conv->img_size_y(),
conv
->
padding_y
(),
// conv->filter_size_y(),
conv
->
stride_y
(),
// conv->padding_y(),
/* caffeMode */
true
));
// conv->stride_y(),
config
.
layerConfig
.
set_size
(
conv
->
output_x
()
*
conv
->
output_y
()
*
// /* caffeMode */ true));
config
.
layerConfig
.
num_filters
());
// config.layerConfig.set_size(conv->output_x() * conv->output_y() *
// config.layerConfig.num_filters());
testLayerGrad
(
config
,
"conv"
,
100
,
trans
,
useGpu
);
//
// Use small batch_size and useWeight=true to test biasGrad
// testLayerGrad(config, "conv", 100, trans, useGpu);
testLayerGrad
(
config
,
"conv"
,
2
,
trans
,
useGpu
,
true
,
0.02
);
// // Use small batch_size and useWeight=true to test biasGrad
}
// testLayerGrad(config, "conv", 2, trans, useGpu, true, 0.02);
// }
TEST
(
Layer
,
convLayer
)
{
//
testConvLayer
(
"exconv"
,
/* trans= */
false
,
/* useGpu= */
false
);
// TEST(Layer, convLayer) {
#ifndef PADDLE_ONLY_CPU
// testConvLayer("exconv", /* trans= */ false, /* useGpu= */ false);
testConvLayer
(
"exconv"
,
/* trans= */
false
,
/* useGpu= */
true
);
// #ifndef PADDLE_ONLY_CPU
testConvLayer
(
"cudnn_conv"
,
/* trans= */
false
,
/* useGpu= */
true
);
// testConvLayer("exconv", /* trans= */ false, /* useGpu= */ true);
#endif
// testConvLayer("cudnn_conv", /* trans= */ false, /* useGpu= */ true);
}
// #endif
// }
void
testConvTransLayer
(
const
string
&
type
,
bool
trans
,
bool
useGpu
)
{
//
TestConfig
config
;
// void testConvTransLayer(const string& type, bool trans, bool useGpu) {
config
.
biasSize
=
3
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
type
);
// config.biasSize = 3;
config
.
layerConfig
.
set_num_filters
(
3
);
// config.layerConfig.set_type(type);
config
.
layerConfig
.
set_partial_sum
(
1
);
// config.layerConfig.set_num_filters(3);
config
.
layerConfig
.
set_shared_biases
(
true
);
// config.layerConfig.set_partial_sum(1);
// config.layerConfig.set_shared_biases(true);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1024
,
384
});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384});
ConvConfig
*
conv
=
input
->
mutable_conv_conf
();
// LayerInputConfig* input = config.layerConfig.add_inputs();
conv
->
set_filter_size
(
2
);
// ConvConfig* conv = input->mutable_conv_conf();
conv
->
set_filter_size_y
(
4
);
// conv->set_filter_size(2);
conv
->
set_channels
(
16
);
// conv->set_filter_size_y(4);
conv
->
set_padding
(
0
);
// conv->set_channels(16);
conv
->
set_padding_y
(
1
);
// conv->set_padding(0);
conv
->
set_stride
(
2
);
// conv->set_padding_y(1);
conv
->
set_stride_y
(
2
);
// conv->set_stride(2);
conv
->
set_groups
(
1
);
// conv->set_stride_y(2);
conv
->
set_filter_channels
(
3
/
conv
->
groups
());
// conv->set_groups(1);
conv
->
set_img_size
(
16
);
// conv->set_filter_channels(3 / conv->groups());
conv
->
set_output_x
(
outputSize
(
conv
->
img_size
(),
// conv->set_img_size(16);
conv
->
filter_size
(),
// conv->set_output_x(outputSize(conv->img_size(),
conv
->
padding
(),
// conv->filter_size(),
conv
->
stride
(),
// conv->padding(),
/* caffeMode */
true
));
// conv->stride(),
// /* caffeMode */ true));
config
.
layerConfig
.
set_size
(
conv
->
img_size
()
*
conv
->
img_size
()
*
//
config
.
layerConfig
.
num_filters
());
// config.layerConfig.set_size(conv->img_size() * conv->img_size() *
// config.layerConfig.num_filters());
testLayerGrad
(
config
,
"convTrans"
,
100
,
trans
,
useGpu
);
//
// Use small batch_size and useWeight=true to test biasGrad
// testLayerGrad(config, "convTrans", 100, trans, useGpu);
testLayerGrad
(
config
,
"convTrans"
,
2
,
trans
,
useGpu
,
true
,
0.02
);
// // Use small batch_size and useWeight=true to test biasGrad
}
// testLayerGrad(config, "convTrans", 2, trans, useGpu, true, 0.02);
// }
TEST
(
Layer
,
convTransLayer
)
{
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// TEST(Layer, convTransLayer) {
testConvTransLayer
(
"exconvt"
,
/* trans= */
false
,
/* useGpu= */
useGpu
);
// for (auto useGpu : {false, true}) {
}
// testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu);
#ifndef PADDLE_ONLY_CPU
// }
testConvTransLayer
(
"cudnn_convt"
,
/* trans= */
false
,
/* useGpu= */
true
);
// #ifndef PADDLE_ONLY_CPU
#endif
// testConvTransLayer("cudnn_convt", /* trans= */ false, /* useGpu= */ true);
}
// #endif
// }
TEST
(
Layer
,
blockExpandLayer
)
{
//
TestConfig
config
;
// TEST(Layer, blockExpandLayer) {
config
.
biasSize
=
0
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"blockexpand"
);
// config.biasSize = 0;
// config.layerConfig.set_type("blockexpand");
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
6144
,
0
});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 6144, 0});
BlockExpandConfig
*
blockExpand
=
input
->
mutable_block_expand_conf
();
// LayerInputConfig* input = config.layerConfig.add_inputs();
blockExpand
->
set_img_size_x
(
64
);
// BlockExpandConfig* blockExpand = input->mutable_block_expand_conf();
blockExpand
->
set_img_size_y
(
32
);
// blockExpand->set_img_size_x(64);
blockExpand
->
set_channels
(
3
);
// blockExpand->set_img_size_y(32);
blockExpand
->
set_padding_x
(
0
);
// blockExpand->set_channels(3);
blockExpand
->
set_padding_y
(
0
);
// blockExpand->set_padding_x(0);
blockExpand
->
set_block_x
(
4
);
// blockExpand->set_padding_y(0);
blockExpand
->
set_block_y
(
32
);
// blockExpand->set_block_x(4);
blockExpand
->
set_stride_x
(
2
);
// blockExpand->set_block_y(32);
blockExpand
->
set_stride_y
(
2
);
// blockExpand->set_stride_x(2);
blockExpand
->
set_output_x
(
outputSize
(
blockExpand
->
img_size_x
(),
// blockExpand->set_stride_y(2);
blockExpand
->
block_x
(),
// blockExpand->set_output_x(outputSize(blockExpand->img_size_x(),
blockExpand
->
padding_x
(),
// blockExpand->block_x(),
blockExpand
->
stride_x
(),
// blockExpand->padding_x(),
/* caffeMode */
false
));
// blockExpand->stride_x(),
blockExpand
->
set_output_y
(
outputSize
(
blockExpand
->
img_size_y
(),
// /* caffeMode */ false));
blockExpand
->
block_y
(),
// blockExpand->set_output_y(outputSize(blockExpand->img_size_y(),
blockExpand
->
padding_y
(),
// blockExpand->block_y(),
blockExpand
->
stride_y
(),
// blockExpand->padding_y(),
/* caffeMode */
false
));
// blockExpand->stride_y(),
config
.
layerConfig
.
set_size
(
blockExpand
->
block_x
()
*
blockExpand
->
block_y
()
*
// /* caffeMode */ false));
blockExpand
->
channels
());
// config.layerConfig.set_size(blockExpand->block_x() * blockExpand->block_y()
// *
for
(
auto
useGpu
:
{
false
,
true
})
{
// blockExpand->channels());
testLayerGrad
(
config
,
"blockexpand"
,
100
,
false
,
useGpu
);
//
}
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(config, "blockexpand", 100, false, useGpu);
// }
TEST
(
Layer
,
maxoutLayer
)
{
// }
TestConfig
config
;
//
config
.
biasSize
=
0
;
// TEST(Layer, maxoutLayer) {
config
.
layerConfig
.
set_type
(
"maxout"
);
// TestConfig config;
// config.biasSize = 0;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
4096
,
0
});
// config.layerConfig.set_type("maxout");
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
//
MaxOutConfig
*
maxout
=
input
->
mutable_maxout_conf
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0});
ImageConfig
*
image
=
maxout
->
mutable_image_conf
();
// LayerInputConfig* input = config.layerConfig.add_inputs();
// MaxOutConfig* maxout = input->mutable_maxout_conf();
image
->
set_img_size
(
32
);
// ImageConfig* image = maxout->mutable_image_conf();
image
->
set_img_size_y
(
32
);
//
image
->
set_channels
(
4
);
// image->set_img_size(32);
maxout
->
set_groups
(
2
);
// image->set_img_size_y(32);
// image->set_channels(4);
for
(
auto
useGpu
:
{
false
,
true
})
{
// maxout->set_groups(2);
testLayerGrad
(
config
,
"maxout"
,
10
,
false
,
useGpu
);
//
}
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(config, "maxout", 10, false, useGpu);
void
testFcLayer
(
string
format
,
size_t
nnz
)
{
// }
TestConfig
config
;
// }
config
.
biasSize
=
4096
;
// void testFcLayer(string format, size_t nnz) {
config
.
layerConfig
.
set_type
(
"fc"
);
// TestConfig config;
config
.
layerConfig
.
set_size
(
4096
);
// config.biasSize = 4096;
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_type("fc");
config
.
layerConfig
.
set_drop_rate
(
0.1
);
// config.layerConfig.set_size(4096);
// config.layerConfig.set_active_type("sigmoid");
config
.
inputDefs
.
push_back
(
// config.layerConfig.set_drop_rate(0.1);
{
INPUT_DATA
,
"layer_0"
,
8192
,
nnz
,
ParaSparse
(
format
)});
//
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back(
// {INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)});
LOG
(
INFO
)
<<
config
.
inputDefs
[
0
].
sparse
.
sparse
<<
" "
// config.layerConfig.add_inputs();
<<
config
.
inputDefs
[
0
].
sparse
.
format
;
//
// LOG(INFO) << config.inputDefs[0].sparse.sparse << " "
for
(
auto
useGpu
:
{
false
,
true
})
{
// << config.inputDefs[0].sparse.format;
testLayerGrad
(
config
,
//
"fc"
,
// for (auto useGpu : {false, true}) {
100
,
// testLayerGrad(config,
/* trans */
false
,
// "fc",
useGpu
,
// 100,
/* weight */
true
);
// /* trans */ false,
}
// useGpu,
}
// /* weight */ true);
// }
TEST
(
Layer
,
fcLayer
)
{
// }
testFcLayer
(
""
,
4096
*
4096
*
2
);
//
testFcLayer
(
"csc"
,
4096
*
40
);
// TEST(Layer, fcLayer) {
testFcLayer
(
"csr"
,
4096
*
40
);
// testFcLayer("", 4096 * 4096 * 2);
}
// testFcLayer("csc", 4096 * 40);
// testFcLayer("csr", 4096 * 40);
TEST
(
Layer
,
SelectiveFullyConnectedLayer
)
{
// }
TestConfig
config
;
//
size_t
nin
=
16
;
// TEST(Layer, SelectiveFullyConnectedLayer) {
size_t
nout
=
256
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"selective_fc"
);
// size_t nin = 16;
config
.
layerConfig
.
set_size
(
nout
);
// size_t nout = 256;
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_type("selective_fc");
config
.
layerConfig
.
set_has_selected_colums
(
true
);
// config.layerConfig.set_size(nout);
config
.
layerConfig
.
set_selective_fc_pass_generation
(
false
);
// config.layerConfig.set_active_type("sigmoid");
config
.
biasSize
=
nout
;
// config.layerConfig.set_has_selected_colums(true);
// config.layerConfig.set_selective_fc_pass_generation(false);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"input0"
,
nin
,
nin
*
nout
});
// config.biasSize = nout;
config
.
layerConfig
.
add_inputs
();
//
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back({INPUT_DATA, "input0", nin, nin * nout});
{
INPUT_SPARSE_NON_VALUE_DATA
,
"index"
,
nout
,
0
,
ParaSparse
(
"csr"
,
true
)});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back(
// {INPUT_SPARSE_NON_VALUE_DATA, "index", nout, 0, ParaSparse("csr",
testLayerGrad
(
config
,
// true)});
"selective_fc"
,
// config.layerConfig.add_inputs();
100
,
//
/* trans= */
false
,
// testLayerGrad(config,
/* useGup= */
false
,
// "selective_fc",
false
);
// 100,
#ifndef PADDLE_ONLY_CPU
// /* trans= */ false,
testLayerGrad
(
config
,
// /* useGup= */ false,
"selective_fc"
,
// false);
100
,
// #ifndef PADDLE_ONLY_CPU
/* trans= */
false
,
// testLayerGrad(config,
/* useGup= */
true
,
// "selective_fc",
false
);
// 100,
#endif
// /* trans= */ false,
}
// /* useGup= */ true,
// false);
TEST
(
Layer
,
DataNormLayer
)
{
// #endif
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
"data_norm"
);
//
config
.
layerConfig
.
set_size
(
20
);
// TEST(Layer, DataNormLayer) {
config
.
biasSize
=
0
;
// TestConfig config;
// config.layerConfig.set_type("data_norm");
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
20
,
100
});
// config.layerConfig.set_size(20);
config
.
inputDefs
.
back
().
isStatic
=
true
;
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 20, 100});
for
(
auto
strategy
:
{
"z-score"
,
"min-max"
,
"decimal-scaling"
})
{
// config.inputDefs.back().isStatic = true;
config
.
layerConfig
.
set_data_norm_strategy
(
strategy
);
// config.layerConfig.add_inputs();
// The parameters are static, so not support GPU now
//
testLayerGrad
(
config
,
// for (auto strategy : {"z-score", "min-max", "decimal-scaling"}) {
"data_norm"
,
// config.layerConfig.set_data_norm_strategy(strategy);
200
,
// // The parameters are static, so not support GPU now
/* trans */
false
,
// testLayerGrad(config,
/* useGpu */
false
);
// "data_norm",
}
// 200,
}
// /* trans */ false,
// /* useGpu */ false);
TEST
(
Layer
,
hsigmoidLayer
)
{
// }
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
"hsigmoid"
);
//
config
.
layerConfig
.
set_num_classes
(
5
);
// TEST(Layer, hsigmoidLayer) {
config
.
layerConfig
.
set_size
(
1
);
// TestConfig config;
config
.
biasSize
=
config
.
layerConfig
.
num_classes
()
-
1
;
// config.layerConfig.set_type("hsigmoid");
// config.layerConfig.set_num_classes(5);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
200
});
// config.layerConfig.set_size(1);
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
5
,
0
});
// config.biasSize = config.layerConfig.num_classes() - 1;
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 200});
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 5, 0});
// Not support GPU now
// config.layerConfig.add_inputs();
testLayerGrad
(
config
,
// config.layerConfig.add_inputs();
"hsigmoid"
,
//
100
,
// // Not support GPU now
/* trans */
false
,
/* useGpu */
// testLayerGrad(config,
false
);
// "hsigmoid",
}
// 100,
// /* trans */ false, /* useGpu */
TEST
(
Layer
,
multi_cross
)
{
// false);
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
"multi-class-cross-entropy"
);
//
config
.
biasSize
=
0
;
// TEST(Layer, multi_cross) {
// TestConfig config;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// config.layerConfig.set_type("multi-class-cross-entropy");
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
10
,
0
});
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 10, 0});
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.add_inputs();
testLayerGrad
(
// config.layerConfig.add_inputs();
config
,
"multi-class-cross-entropy"
,
100
,
/* trans */
false
,
useGpu
);
//
}
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(
// config, "multi-class-cross-entropy", 100, /* trans */ false, useGpu);
TEST
(
Layer
,
multi_binary_label_sparse_mat
)
{
// }
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
"multi_binary_label_cross_entropy"
);
//
config
.
biasSize
=
0
;
// TEST(Layer, multi_binary_label_sparse_mat) {
// TestConfig config;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// config.layerConfig.set_type("multi_binary_label_cross_entropy");
config
.
inputDefs
.
push_back
({
INPUT_SPARSE_NON_VALUE_DATA
,
"layer_1"
,
50
,
0
});
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
// config.inputDefs.push_back({INPUT_SPARSE_NON_VALUE_DATA, "layer_1", 50,
for
(
auto
useGpu
:
{
false
,
true
})
{
// 0});
testLayerGrad
(
config
,
// config.layerConfig.add_inputs();
"multi_binary_label_cross_entropy"
,
// config.layerConfig.add_inputs();
100
,
//
/* trans */
false
,
// for (auto useGpu : {false, true}) {
useGpu
);
// testLayerGrad(config,
}
// "multi_binary_label_cross_entropy",
}
// 100,
// /* trans */ false,
TEST
(
layer
,
multi_binary_label_id
)
{
// useGpu);
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
"multi_binary_label_cross_entropy"
);
// }
config
.
biasSize
=
0
;
//
// TEST(layer, multi_binary_label_id) {
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// TestConfig config;
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
10
,
0
});
// config.layerConfig.set_type("multi_binary_label_cross_entropy");
config
.
layerConfig
.
add_inputs
();
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 10, 0});
testLayerGrad
(
config
,
// config.layerConfig.add_inputs();
"multi_binary_label_cross_entropy"
,
// config.layerConfig.add_inputs();
100
,
//
/* trans */
false
,
// for (auto useGpu : {false, true}) {
useGpu
);
// testLayerGrad(config,
}
// "multi_binary_label_cross_entropy",
}
// 100,
// /* trans */ false,
TEST
(
Layer
,
multi_cross_with_selfnorm
)
{
// useGpu);
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
"multi_class_cross_entropy_with_selfnorm"
);
// }
config
.
layerConfig
.
set_softmax_selfnorm_alpha
(
0.1
);
//
config
.
biasSize
=
0
;
// TEST(Layer, multi_cross_with_selfnorm) {
// TestConfig config;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// config.layerConfig.set_type("multi_class_cross_entropy_with_selfnorm");
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
10
,
0
});
// config.layerConfig.set_softmax_selfnorm_alpha(0.1);
config
.
layerConfig
.
add_inputs
();
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
// Not support GPU now
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 10, 0});
testLayerGrad
(
config
,
// config.layerConfig.add_inputs();
"multi_class_cross_entropy_with_selfnorm"
,
// config.layerConfig.add_inputs();
100
,
//
/* trans */
false
,
// // Not support GPU now
/* useGpu */
false
);
// testLayerGrad(config,
}
// "multi_class_cross_entropy_with_selfnorm",
// 100,
TEST
(
Layer
,
multi_cross_soft
)
{
// /* trans */ false,
TestConfig
config
;
// /* useGpu */ false);
config
.
layerConfig
.
set_type
(
"soft_binary_class_cross_entropy"
);
// }
config
.
biasSize
=
0
;
//
// TEST(Layer, multi_cross_soft) {
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// TestConfig config;
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_1"
,
10
,
0
});
// config.layerConfig.set_type("soft_binary_class_cross_entropy");
config
.
layerConfig
.
add_inputs
();
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 10, 0});
testLayerGrad
(
config
,
// config.layerConfig.add_inputs();
"soft_binary_class_cross_entropy"
,
// config.layerConfig.add_inputs();
100
,
//
/* trans */
false
,
// for (auto useGpu : {false, true}) {
useGpu
);
// testLayerGrad(config,
}
// "soft_binary_class_cross_entropy",
}
// 100,
// /* trans */ false,
TEST
(
Layer
,
square_error
)
{
// useGpu);
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
"square_error"
);
// }
config
.
biasSize
=
0
;
//
// TEST(Layer, square_error) {
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// TestConfig config;
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_1"
,
10
,
0
});
// config.layerConfig.set_type("square_error");
config
.
layerConfig
.
add_inputs
();
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 10, 0});
testLayerGrad
(
config
,
"square_error"
,
100
,
/* trans */
false
,
useGpu
);
// config.layerConfig.add_inputs();
}
// config.layerConfig.add_inputs();
}
//
// for (auto useGpu : {false, true}) {
TEST
(
Layer
,
sparse_square_error
)
{
// testLayerGrad(config, "square_error", 100, /* trans */ false, useGpu);
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
"square_error"
);
// }
config
.
biasSize
=
0
;
//
// TEST(Layer, sparse_square_error) {
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// TestConfig config;
config
.
inputDefs
.
push_back
({
INPUT_SPARSE_NON_VALUE_DATA
,
"layer_1"
,
50
,
0
});
// config.layerConfig.set_type("square_error");
config
.
layerConfig
.
add_inputs
();
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
// "GpuSparseMatrix" as label is not supported
// config.inputDefs.push_back({INPUT_SPARSE_NON_VALUE_DATA, "layer_1", 50,
testLayerGrad
(
config
,
// 0});
"square_error"
,
// config.layerConfig.add_inputs();
100
,
// config.layerConfig.add_inputs();
/* trans */
false
,
//
/* useGpu */
false
);
// // "GpuSparseMatrix" as label is not supported
}
// testLayerGrad(config,
// "square_error",
TEST
(
Layer
,
sparse_float_square_error
)
{
// 100,
TestConfig
config
;
// /* trans */ false,
config
.
layerConfig
.
set_type
(
"square_error"
);
// /* useGpu */ false);
config
.
biasSize
=
0
;
// }
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// TEST(Layer, sparse_float_square_error) {
config
.
inputDefs
.
push_back
({
INPUT_SPARSE_FLOAT_VALUE_DATA
,
"layer_1"
,
50
,
0
});
// TestConfig config;
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_type("square_error");
config
.
layerConfig
.
add_inputs
();
// config.biasSize = 0;
//
// "GpuSparseMatrix" as label is not supported
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
testLayerGrad
(
config
,
// config.inputDefs.push_back({INPUT_SPARSE_FLOAT_VALUE_DATA, "layer_1", 50,
"square_error"
,
// 0});
100
,
// config.layerConfig.add_inputs();
/* trans */
false
,
// config.layerConfig.add_inputs();
/* useGpu */
false
);
//
}
// // "GpuSparseMatrix" as label is not supported
// testLayerGrad(config,
TEST
(
Layer
,
square_error_weighted
)
{
// "square_error",
TestConfig
config
;
// 100,
config
.
layerConfig
.
set_type
(
"square_error"
);
// /* trans */ false,
config
.
biasSize
=
0
;
// /* useGpu */ false);
config
.
testAccumulate
=
false
;
// }
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// TEST(Layer, square_error_weighted) {
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_1"
,
10
,
0
});
// TestConfig config;
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_2"
,
1
,
0
});
// config.layerConfig.set_type("square_error");
config
.
layerConfig
.
add_inputs
();
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
// config.testAccumulate = false;
config
.
layerConfig
.
add_inputs
();
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 10, 0});
testLayerGrad
(
config
,
"square_error"
,
100
,
/* trans */
false
,
useGpu
);
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_2", 1, 0});
}
// config.layerConfig.add_inputs();
}
// config.layerConfig.add_inputs();
// config.layerConfig.add_inputs();
TEST
(
Layer
,
huber_two_class
)
{
//
TestConfig
config
;
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_type
(
"huber"
);
// testLayerGrad(config, "square_error", 100, /* trans */ false, useGpu);
config
.
biasSize
=
0
;
// }
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
//
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
2
,
0
});
// TEST(Layer, huber_two_class) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_type("huber");
// config.biasSize = 0;
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
"huber"
,
100
,
/* trans */
false
,
useGpu
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
}
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 2, 0});
}
// config.layerConfig.add_inputs();
// config.layerConfig.add_inputs();
void
testExpandLayer
(
string
trans_type
,
bool
hasSubseq
)
{
//
TestConfig
config
;
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_type
(
"expand"
);
// testLayerGrad(config, "huber", 100, /* trans */ false, useGpu);
// }
config
.
inputDefs
.
push_back
(
// }
{
trans_type
==
"non-seq"
?
INPUT_DENSE_DIM_DATA
:
INPUT_SEQUENCE_DATA
,
//
"layer_0"
,
// void testExpandLayer(string trans_type, bool hasSubseq) {
10
,
// TestConfig config;
0
});
// config.layerConfig.set_type("expand");
config
.
inputDefs
.
push_back
(
//
{
hasSubseq
?
INPUT_HASSUB_SEQUENCE_DATA
:
INPUT_SEQUENCE_DATA
,
// config.inputDefs.push_back(
"layer_1"
,
// {trans_type == "non-seq" ? INPUT_DENSE_DIM_DATA : INPUT_SEQUENCE_DATA,
10
,
// "layer_0",
0
});
// 10,
config
.
layerConfig
.
add_inputs
();
// 0});
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back(
config
.
layerConfig
.
set_trans_type
(
trans_type
);
// {hasSubseq ? INPUT_HASSUB_SEQUENCE_DATA : INPUT_SEQUENCE_DATA,
LOG
(
INFO
)
<<
" trans_type="
<<
trans_type
<<
" hasSubseq="
<<
hasSubseq
;
// "layer_1",
// 10,
for
(
auto
useGpu
:
{
false
,
true
})
{
// 0});
testLayerGrad
(
config
,
"expand"
,
30
,
false
,
useGpu
);
// config.layerConfig.add_inputs();
}
// config.layerConfig.add_inputs();
}
// config.layerConfig.set_trans_type(trans_type);
// LOG(INFO) << " trans_type=" << trans_type << " hasSubseq=" << hasSubseq;
TEST
(
Layer
,
ExpandLayer
)
{
//
testExpandLayer
(
"non-seq"
,
false
);
// non-seq expand to seq
// for (auto useGpu : {false, true}) {
testExpandLayer
(
"non-seq"
,
true
);
// non-seq expand to hasSubseq
// testLayerGrad(config, "expand", 30, false, useGpu);
testExpandLayer
(
"seq"
,
true
);
// seq expand to hasSubseq
// }
}
// }
//
void
testDegradeLayer
(
bool
hasSubseq
,
// TEST(Layer, ExpandLayer) {
string
layer_type
,
// testExpandLayer("non-seq", false); // non-seq expand to seq
string
trans_type
,
// testExpandLayer("non-seq", true); // non-seq expand to hasSubseq
int
stride
)
{
// testExpandLayer("seq", true); // seq expand to hasSubseq
TestConfig
config
;
// }
config
.
layerConfig
.
set_type
(
layer_type
);
//
config
.
layerConfig
.
set_size
(
10
);
// void testDegradeLayer(bool hasSubseq,
config
.
layerConfig
.
set_seq_pool_stride
(
stride
);
// string layer_type,
config
.
biasSize
=
0
;
// string trans_type,
// int stride) {
config
.
inputDefs
.
push_back
(
// TestConfig config;
{
hasSubseq
?
INPUT_HASSUB_SEQUENCE_DATA
:
INPUT_SEQUENCE_DATA
,
// config.layerConfig.set_type(layer_type);
"layer_0"
,
// config.layerConfig.set_size(10);
10
,
// config.layerConfig.set_seq_pool_stride(stride);
0
});
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
set_trans_type
(
trans_type
);
// config.inputDefs.push_back(
// {hasSubseq ? INPUT_HASSUB_SEQUENCE_DATA : INPUT_SEQUENCE_DATA,
auto
testDegradeLayerGrad
=
[](
TestConfig
&
config
,
string
layer_type
)
{
// "layer_0",
for
(
auto
useGpu
:
{
false
,
true
})
{
// 10,
testLayerGrad
(
config
,
layer_type
,
100
,
false
,
useGpu
);
// 0});
}
// config.layerConfig.add_inputs();
};
// config.layerConfig.set_trans_type(trans_type);
//
if
(
layer_type
==
"average"
)
{
// auto testDegradeLayerGrad = [](TestConfig& config, string layer_type) {
for
(
auto
strategy
:
{
"average"
,
"sum"
,
"squarerootn"
})
{
// for (auto useGpu : {false, true}) {
LOG
(
INFO
)
<<
" hasSubseq="
<<
hasSubseq
<<
" trans_type="
<<
trans_type
// testLayerGrad(config, layer_type, 100, false, useGpu);
<<
" average_strategy="
<<
strategy
// }
<<
" seq_pool_stride="
<<
stride
;
// };
config
.
layerConfig
.
set_average_strategy
(
strategy
);
//
testDegradeLayerGrad
(
config
,
layer_type
);
// if (layer_type == "average") {
}
// for (auto strategy : {"average", "sum", "squarerootn"}) {
}
else
{
// LOG(INFO) << " hasSubseq=" << hasSubseq << " trans_type=" << trans_type
LOG
(
INFO
)
<<
" hasSubseq="
<<
hasSubseq
<<
" trans_type="
<<
trans_type
// << " average_strategy=" << strategy
<<
" seq_pool_stride="
<<
stride
;
// << " seq_pool_stride=" << stride;
testDegradeLayerGrad
(
config
,
layer_type
);
// config.layerConfig.set_average_strategy(strategy);
}
// testDegradeLayerGrad(config, layer_type);
}
// }
// } else {
TEST
(
Layer
,
MaxLayer
)
{
// LOG(INFO) << " hasSubseq=" << hasSubseq << " trans_type=" << trans_type
testDegradeLayer
(
false
,
"max"
,
"non-seq"
,
-
1
);
// seq max to non-seq
// << " seq_pool_stride=" << stride;
testDegradeLayer
(
false
,
// testDegradeLayerGrad(config, layer_type);
"max"
,
// }
"non-seq"
,
// }
5
);
// seq max to a shorten seq, stride window = 5
//
testDegradeLayer
(
true
,
"max"
,
"non-seq"
,
-
1
);
// hasSubseq max to non-seq
// TEST(Layer, MaxLayer) {
testDegradeLayer
(
true
,
"max"
,
"seq"
,
-
1
);
// hasSubseq max to seq
// testDegradeLayer(false, "max", "non-seq", -1); // seq max to non-seq
}
// testDegradeLayer(false,
// "max",
TEST
(
Layer
,
SequenceLastInstanceLayer
)
{
// "non-seq",
testDegradeLayer
(
false
,
// 5); // seq max to a shorten seq, stride window = 5
"seqlastins"
,
// testDegradeLayer(true, "max", "non-seq", -1); // hasSubseq max to non-seq
"non-seq"
,
// testDegradeLayer(true, "max", "seq", -1); // hasSubseq max to seq
-
1
);
// seq seqlastins to non-seq
// }
testDegradeLayer
(
false
,
//
"seqlastins"
,
// TEST(Layer, SequenceLastInstanceLayer) {
"non-seq"
,
// testDegradeLayer(false,
5
);
// seq seqlastins to a shorten seq, stride window = 5
// "seqlastins",
testDegradeLayer
(
true
,
// "non-seq",
"seqlastins"
,
// -1); // seq seqlastins to non-seq
"non-seq"
,
// testDegradeLayer(false,
-
1
);
// hasSubseq seqlastins to non-seq
// "seqlastins",
testDegradeLayer
(
// "non-seq",
true
,
"seqlastins"
,
"seq"
,
-
1
);
// hasSubseq seqlastins to seq
// 5); // seq seqlastins to a shorten seq, stride window = 5
}
// testDegradeLayer(true,
// "seqlastins",
TEST
(
Layer
,
AverageLayer
)
{
// "non-seq",
testDegradeLayer
(
false
,
"average"
,
"non-seq"
,
-
1
);
// seq average to non-seq
// -1); // hasSubseq seqlastins to non-seq
testDegradeLayer
(
false
,
// testDegradeLayer(
"average"
,
// true, "seqlastins", "seq", -1); // hasSubseq seqlastins to seq
"non-seq"
,
// }
5
);
// seq average to a shorten seq, stride window = 5
//
testDegradeLayer
(
// TEST(Layer, AverageLayer) {
true
,
"average"
,
"non-seq"
,
-
1
);
// hasSubseq average to non-seq
// testDegradeLayer(false, "average", "non-seq", -1); // seq average to
testDegradeLayer
(
true
,
"average"
,
"seq"
,
-
1
);
// hasSubseq average to seq
// non-seq
}
// testDegradeLayer(false,
// "average",
TEST
(
Layer
,
SequenceConcatLayer
)
{
// "non-seq",
TestConfig
config
;
// 5); // seq average to a shorten seq, stride window = 5
config
.
layerConfig
.
set_type
(
"seqconcat"
);
// testDegradeLayer(
config
.
layerConfig
.
set_size
(
10
);
// true, "average", "non-seq", -1); // hasSubseq average to
config
.
biasSize
=
0
;
// non-seq
// testDegradeLayer(true, "average", "seq", -1); // hasSubseq average to seq
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_0"
,
10
,
0
});
// }
config
.
layerConfig
.
add_inputs
();
//
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_1"
,
10
,
0
});
// TEST(Layer, SequenceConcatLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
// config.layerConfig.set_type("seqconcat");
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_size(10);
testLayerGrad
(
config
,
"seqconcat"
,
100
,
false
,
useGpu
);
// config.biasSize = 0;
}
//
}
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_0", 10, 0});
// config.layerConfig.add_inputs();
TEST
(
Layer
,
SequenceReshapeLayer
)
{
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_1", 10, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"seqreshape"
);
//
config
.
layerConfig
.
set_size
(
10
);
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "seqconcat", 100, false, useGpu);
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_0"
,
100
,
0
});
// }
config
.
layerConfig
.
add_inputs
();
// }
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// TEST(Layer, SequenceReshapeLayer) {
testLayerGrad
(
config
,
"seqreshape"
,
100
,
false
,
useGpu
);
// TestConfig config;
}
// config.layerConfig.set_type("seqreshape");
}
// config.layerConfig.set_size(10);
//
TEST
(
Layer
,
ConvShiftLayer
)
{
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_0", 100, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"conv_shift"
);
//
config
.
layerConfig
.
set_size
(
10
);
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "seqreshape", 100, false, useGpu);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
3
,
0
});
// }
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// TEST(Layer, ConvShiftLayer) {
// TestConfig config;
// Not support GPU now
// config.layerConfig.set_type("conv_shift");
testLayerGrad
(
config
,
"conv_shift"
,
100
,
false
,
false
);
// config.layerConfig.set_size(10);
}
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
TEST
(
Layer
,
PowerLayer
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 3, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"power"
);
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_size
(
10
);
//
// // Not support GPU now
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// testLayerGrad(config, "conv_shift", 100, false, false);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// }
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// TEST(Layer, PowerLayer) {
// TestConfig config;
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_type("power");
testLayerGrad
(
config
,
"power"
,
100
,
false
,
useGpu
);
// config.layerConfig.set_size(10);
}
//
}
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
TEST
(
Layer
,
ConvexCombinationLayer
)
{
// config.layerConfig.add_inputs();
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"convex_comb"
);
//
config
.
layerConfig
.
set_size
(
20
);
// for (auto useGpu : {false, true}) {
config
.
biasSize
=
0
;
// testLayerGrad(config, "power", 100, false, useGpu);
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
5
,
0
});
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
100
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// TEST(Layer, ConvexCombinationLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
// config.layerConfig.set_type("convex_comb");
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_size(20);
testLayerGrad
(
config
,
"convex_comb"
,
100
,
false
,
useGpu
);
// config.biasSize = 0;
}
//
}
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 5, 0});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 100, 0});
TEST
(
Layer
,
InterpolationLayer
)
{
// config.layerConfig.add_inputs();
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"interpolation"
);
//
config
.
layerConfig
.
set_size
(
10
);
// for (auto useGpu : {false, true}) {
config
.
biasSize
=
0
;
// testLayerGrad(config, "convex_comb", 100, false, useGpu);
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_2"
,
10
,
0
});
// TEST(Layer, InterpolationLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_type("interpolation");
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_size(10);
// config.biasSize = 0;
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
"interpolation"
,
100
,
false
,
useGpu
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
}
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
}
// config.inputDefs.push_back({INPUT_DATA, "layer_2", 10, 0});
// config.layerConfig.add_inputs();
TEST
(
Layer
,
OuterProdLayer
)
{
// config.layerConfig.add_inputs();
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"out_prod"
);
//
config
.
layerConfig
.
set_size
(
100
);
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "interpolation", 100, false, useGpu);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// }
config
.
layerConfig
.
add_inputs
();
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// TEST(Layer, OuterProdLayer) {
// TestConfig config;
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_type("out_prod");
testLayerGrad
(
config
,
"out_prod"
,
100
,
false
,
useGpu
);
// config.layerConfig.set_size(100);
}
//
}
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
// config.layerConfig.add_inputs();
TEST
(
Layer
,
SlopeInterceptLayer
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"slope_intercept"
);
//
config
.
layerConfig
.
set_size
(
10
);
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_slope
(
1.0
);
// testLayerGrad(config, "out_prod", 100, false, useGpu);
config
.
layerConfig
.
set_intercept
(
0.1
);
// }
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// TEST(Layer, SlopeInterceptLayer) {
// TestConfig config;
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_type("slope_intercept");
testLayerGrad
(
config
,
"slope_intercept"
,
100
,
false
,
useGpu
);
// config.layerConfig.set_size(10);
}
// config.layerConfig.set_slope(1.0);
}
// config.layerConfig.set_intercept(0.1);
//
TEST
(
Layer
,
ScalingLayer
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"scaling"
);
//
config
.
layerConfig
.
set_size
(
10
);
// for (auto useGpu : {false, true}) {
config
.
biasSize
=
0
;
// testLayerGrad(config, "slope_intercept", 100, false, useGpu);
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// }
config
.
layerConfig
.
add_inputs
();
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// TEST(Layer, ScalingLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
// config.layerConfig.set_type("scaling");
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_size(10);
testLayerGrad
(
config
,
"scaling"
,
100
,
false
,
useGpu
);
// config.biasSize = 0;
}
//
}
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
// config.layerConfig.add_inputs();
void
testNormLayer
(
const
string
&
normType
,
bool
trans
,
bool
useGpu
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"norm"
);
//
config
.
layerConfig
.
set_active_type
(
"relu"
);
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "scaling", 100, false, useGpu);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1568
,
0
});
// }
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// }
NormConfig
*
norm
=
input
->
mutable_norm_conf
();
//
norm
->
set_norm_type
(
normType
);
// void testNormLayer(const string& normType, bool trans, bool useGpu) {
norm
->
set_channels
(
16
);
// TestConfig config;
norm
->
set_size
(
5
);
// config.layerConfig.set_type("norm");
norm
->
set_scale
(
0.001
);
// config.layerConfig.set_active_type("relu");
norm
->
set_pow
(
0.75
);
//
norm
->
set_blocked
(
0
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1568, 0});
norm
->
set_img_size
(
14
);
// LayerInputConfig* input = config.layerConfig.add_inputs();
norm
->
set_img_size_y
(
7
);
// NormConfig* norm = input->mutable_norm_conf();
norm
->
set_output_x
(
norm
->
img_size
());
// norm->set_norm_type(normType);
norm
->
set_output_y
(
norm
->
img_size_y
());
// norm->set_channels(16);
if
(
norm
->
norm_type
()
==
"cmrnorm"
||
// norm->set_size(5);
norm
->
norm_type
()
==
"cmrnorm-projection"
)
{
// norm->set_scale(0.001);
norm
->
set_scale
(
norm
->
scale
()
/
norm
->
size
());
// norm->set_pow(0.75);
}
else
{
// norm->set_blocked(0);
norm
->
set_scale
(
norm
->
scale
()
/
(
norm
->
size
()
*
norm
->
size
()));
// norm->set_img_size(14);
}
// norm->set_img_size_y(7);
// norm->set_output_x(norm->img_size());
config
.
layerConfig
.
set_size
(
norm
->
output_x
()
*
norm
->
output_y
()
*
// norm->set_output_y(norm->img_size_y());
norm
->
channels
());
// if (norm->norm_type() == "cmrnorm" ||
config
.
biasSize
=
0
;
// norm->norm_type() == "cmrnorm-projection") {
// norm->set_scale(norm->scale() / norm->size());
testLayerGrad
(
config
,
"norm"
,
100
,
trans
,
useGpu
);
// } else {
}
// norm->set_scale(norm->scale() / (norm->size() * norm->size()));
// }
TEST
(
Layer
,
NormLayer
)
{
//
testNormLayer
(
"cmrnorm-projection"
,
// config.layerConfig.set_size(norm->output_x() * norm->output_y() *
/* trans= */
false
,
/* useGpu= */
// norm->channels());
true
);
// config.biasSize = 0;
testNormLayer
(
"cmrnorm-projection"
,
//
/* trans= */
false
,
/* useGpu= */
// testLayerGrad(config, "norm", 100, trans, useGpu);
false
);
// }
}
//
// TEST(Layer, NormLayer) {
void
setPoolConfig
(
TestConfig
*
config
,
// testNormLayer("cmrnorm-projection",
PoolConfig
*
pool
,
// /* trans= */ false, /* useGpu= */
const
string
&
poolType
)
{
// true);
(
*
config
).
biasSize
=
0
;
// testNormLayer("cmrnorm-projection",
(
*
config
).
layerConfig
.
set_type
(
"pool"
);
// /* trans= */ false, /* useGpu= */
(
*
config
).
layerConfig
.
set_num_filters
(
16
);
// false);
// }
int
kw
=
3
,
kh
=
3
;
//
int
pw
=
0
,
ph
=
0
;
// void setPoolConfig(TestConfig* config,
int
sw
=
2
,
sh
=
2
;
// PoolConfig* pool,
pool
->
set_pool_type
(
poolType
);
// const string& poolType) {
pool
->
set_channels
(
16
);
// (*config).biasSize = 0;
pool
->
set_size_x
(
kw
);
// (*config).layerConfig.set_type("pool");
pool
->
set_size_y
(
kh
);
// (*config).layerConfig.set_num_filters(16);
pool
->
set_start
(
0
);
//
pool
->
set_padding
(
pw
);
// int kw = 3, kh = 3;
pool
->
set_padding_y
(
ph
);
// int pw = 0, ph = 0;
pool
->
set_stride
(
sw
);
// int sw = 2, sh = 2;
pool
->
set_stride_y
(
sh
);
// pool->set_pool_type(poolType);
// pool->set_channels(16);
int
ow
=
outputSize
(
pool
->
img_size
(),
kw
,
pw
,
sw
,
/* caffeMode */
false
);
// pool->set_size_x(kw);
int
oh
=
outputSize
(
pool
->
img_size_y
(),
kh
,
ph
,
sh
,
/* caffeMode */
false
);
// pool->set_size_y(kh);
pool
->
set_output_x
(
ow
);
// pool->set_start(0);
pool
->
set_output_y
(
oh
);
// pool->set_padding(pw);
}
// pool->set_padding_y(ph);
// pool->set_stride(sw);
void
testPoolLayer
(
const
string
&
poolType
,
bool
trans
,
bool
useGpu
)
{
// pool->set_stride_y(sh);
TestConfig
config
;
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
3136
,
0
});
// int ow = outputSize(pool->img_size(), kw, pw, sw, /* caffeMode */ false);
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// int oh = outputSize(pool->img_size_y(), kh, ph, sh, /* caffeMode */ false);
PoolConfig
*
pool
=
input
->
mutable_pool_conf
();
// pool->set_output_x(ow);
// pool->set_output_y(oh);
pool
->
set_img_size
(
14
);
// }
pool
->
set_img_size_y
(
14
);
//
setPoolConfig
(
&
config
,
pool
,
poolType
);
// void testPoolLayer(const string& poolType, bool trans, bool useGpu) {
config
.
layerConfig
.
set_size
(
pool
->
output_x
()
*
pool
->
output_y
()
*
// TestConfig config;
pool
->
channels
());
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 3136, 0});
// LayerInputConfig* input = config.layerConfig.add_inputs();
testLayerGrad
(
config
,
"pool"
,
100
,
trans
,
useGpu
);
// PoolConfig* pool = input->mutable_pool_conf();
}
//
// pool->set_img_size(14);
#ifndef PADDLE_ONLY_CPU
// pool->set_img_size_y(14);
void
testPoolLayer2
(
const
string
&
poolType
,
bool
trans
,
bool
useGpu
)
{
// setPoolConfig(&config, pool, poolType);
TestConfig
config
;
// config.layerConfig.set_size(pool->output_x() * pool->output_y() *
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
3200
,
0
});
// pool->channels());
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
//
PoolConfig
*
pool
=
input
->
mutable_pool_conf
();
// testLayerGrad(config, "pool", 100, trans, useGpu);
// }
pool
->
set_size_y
(
4
);
//
pool
->
set_stride_y
(
3
);
// #ifndef PADDLE_ONLY_CPU
pool
->
set_img_size
(
10
);
// void testPoolLayer2(const string& poolType, bool trans, bool useGpu) {
pool
->
set_img_size_y
(
20
);
// TestConfig config;
setPoolConfig
(
&
config
,
pool
,
poolType
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 3200, 0});
pool
->
set_output_y
((
pool
->
img_size_y
()
-
pool
->
start
()
-
pool
->
size_y
())
/
// LayerInputConfig* input = config.layerConfig.add_inputs();
((
float
)
pool
->
stride_y
())
+
// PoolConfig* pool = input->mutable_pool_conf();
1.5
);
//
config
.
layerConfig
.
set_size
(
pool
->
output_x
()
*
pool
->
output_y
()
*
// pool->set_size_y(4);
pool
->
channels
());
// pool->set_stride_y(3);
// pool->set_img_size(10);
testLayerGrad
(
config
,
"pool"
,
100
,
trans
,
useGpu
);
// pool->set_img_size_y(20);
}
// setPoolConfig(&config, pool, poolType);
#endif
// pool->set_output_y((pool->img_size_y() - pool->start() - pool->size_y()) /
// ((float)pool->stride_y()) +
TEST
(
Layer
,
PoolLayer
)
{
// 1.5);
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
false
);
// config.layerConfig.set_size(pool->output_x() * pool->output_y() *
testPoolLayer
(
"max-projection"
,
/* trans= */
false
,
/* useGpu= */
false
);
// pool->channels());
//
#ifndef PADDLE_ONLY_CPU
// testLayerGrad(config, "pool", 100, trans, useGpu);
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
true
);
// }
testPoolLayer
(
"max-projection"
,
/* trans= */
false
,
/* useGpu= */
true
);
// #endif
testPoolLayer
(
"cudnn-max-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
//
testPoolLayer
(
"cudnn-avg-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
// TEST(Layer, PoolLayer) {
testPoolLayer2
(
"cudnn-max-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
// testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ false);
testPoolLayer2
(
"cudnn-avg-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
// testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ false);
#endif
//
}
// #ifndef PADDLE_ONLY_CPU
// testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ true);
void
testSppLayer
(
const
string
&
poolType
,
// testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ true);
const
int
pyramidHeight
,
// testPoolLayer("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true);
bool
trans
,
// testPoolLayer("cudnn-avg-pool", /* trans= */ false, /* useGpu= */ true);
bool
useGpu
)
{
// testPoolLayer2("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true);
TestConfig
config
;
// testPoolLayer2("cudnn-avg-pool", /* trans= */ false, /* useGpu= */ true);
config
.
layerConfig
.
set_type
(
"spp"
);
// #endif
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
3200
,
0
});
// }
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
//
SppConfig
*
sppConfig
=
input
->
mutable_spp_conf
();
// void testSppLayer(const string& poolType,
sppConfig
->
set_pool_type
(
poolType
);
// const int pyramidHeight,
sppConfig
->
set_pyramid_height
(
pyramidHeight
);
// bool trans,
ImageConfig
*
imageConfig
=
sppConfig
->
mutable_image_conf
();
// bool useGpu) {
imageConfig
->
set_channels
(
16
);
// TestConfig config;
imageConfig
->
set_img_size
(
10
);
// config.layerConfig.set_type("spp");
imageConfig
->
set_img_size_y
(
20
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 3200, 0});
int
outputSize
=
(
std
::
pow
(
4
,
sppConfig
->
pyramid_height
())
-
1
)
/
(
4
-
1
);
// LayerInputConfig* input = config.layerConfig.add_inputs();
config
.
layerConfig
.
set_size
(
outputSize
*
imageConfig
->
channels
());
// SppConfig* sppConfig = input->mutable_spp_conf();
testLayerGrad
(
config
,
"spp"
,
100
,
trans
,
useGpu
);
// sppConfig->set_pool_type(poolType);
}
// sppConfig->set_pyramid_height(pyramidHeight);
// ImageConfig* imageConfig = sppConfig->mutable_image_conf();
TEST
(
Layer
,
SpatialPyramidPoolLayer
)
{
// imageConfig->set_channels(16);
for
(
auto
useGpu
:
{
false
,
true
})
{
// imageConfig->set_img_size(10);
for
(
auto
pyramidHeight
:
{
1
,
2
,
3
})
{
// imageConfig->set_img_size_y(20);
testSppLayer
(
"avg-projection"
,
pyramidHeight
,
false
,
useGpu
);
// int outputSize = (std::pow(4, sppConfig->pyramid_height()) - 1) / (4 - 1);
testSppLayer
(
"max-projection"
,
pyramidHeight
,
false
,
useGpu
);
// config.layerConfig.set_size(outputSize * imageConfig->channels());
}
// testLayerGrad(config, "spp", 100, trans, useGpu);
}
// }
}
//
// TEST(Layer, SpatialPyramidPoolLayer) {
TEST
(
Layer
,
rankCostLayer
)
{
// for (auto useGpu : {false, true}) {
TestConfig
config
;
// for (auto pyramidHeight : {1, 2, 3}) {
config
.
layerConfig
.
set_type
(
"rank-cost"
);
// testSppLayer("avg-projection", pyramidHeight, false, useGpu);
config
.
biasSize
=
0
;
// testSppLayer("max-projection", pyramidHeight, false, useGpu);
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
1
,
0
});
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_2"
,
1
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// TEST(Layer, rankCostLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_type("rank-cost");
// config.biasSize = 0;
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
"rank-cost"
,
100
,
false
,
useGpu
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
}
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 1, 0});
}
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_2", 1, 0});
// config.layerConfig.add_inputs();
TEST
(
Layer
,
sumCostLayer
)
{
// config.layerConfig.add_inputs();
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"sum_cost"
);
//
config
.
biasSize
=
0
;
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "rank-cost", 100, false, useGpu);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// }
config
.
layerConfig
.
add_inputs
();
// }
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// TEST(Layer, sumCostLayer) {
testLayerGrad
(
config
,
"sum_cost"
,
100
,
false
,
useGpu
);
// TestConfig config;
}
// config.layerConfig.set_type("sum_cost");
}
// config.biasSize = 0;
//
TEST
(
Layer
,
weightedRankCostLayer
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"rank-cost"
);
//
config
.
biasSize
=
0
;
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "sum_cost", 100, false, useGpu);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
1
,
0
});
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_2"
,
1
,
0
});
//
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_3"
,
1
,
0
});
// TEST(Layer, weightedRankCostLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_type("rank-cost");
config
.
layerConfig
.
add_inputs
();
// config.biasSize = 0;
config
.
layerConfig
.
add_inputs
();
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 1, 0});
testLayerGrad
(
config
,
"weighted-rank-cost"
,
100
,
false
,
useGpu
);
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_2", 1, 0});
}
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_3", 1, 0});
}
// config.layerConfig.add_inputs();
// config.layerConfig.add_inputs();
TEST
(
Layer
,
TensorLayer
)
{
// config.layerConfig.add_inputs();
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"tensor"
);
//
config
.
layerConfig
.
set_size
(
10
);
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// testLayerGrad(config, "weighted-rank-cost", 100, false, useGpu);
config
.
biasSize
=
config
.
layerConfig
.
size
();
// }
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
5
,
250
});
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
5
,
0
});
// TEST(Layer, TensorLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_type("tensor");
// config.layerConfig.set_size(10);
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_active_type("sigmoid");
testLayerGrad
(
config
,
"tensor"
,
100
,
false
,
useGpu
);
// config.biasSize = config.layerConfig.size();
}
//
}
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 5, 250});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 5, 0});
TEST
(
Layer
,
RecurrentLayer
)
{
// config.layerConfig.add_inputs();
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"recurrent"
);
//
config
.
layerConfig
.
set_size
(
4
);
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_active_type
(
"tanh"
);
// testLayerGrad(config, "tensor", 100, false, useGpu);
config
.
biasSize
=
4
;
// }
// }
config
.
inputDefs
.
push_back
(
//
{
INPUT_SEQUENCE_DATA
,
"layer_0"
,
/* dim= */
4
,
/* paraSize= */
16
});
// TEST(Layer, RecurrentLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
// config.layerConfig.set_type("recurrent");
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_size(4);
for
(
auto
reversed
:
{
false
,
true
})
{
// config.layerConfig.set_active_type("tanh");
config
.
layerConfig
.
set_reversed
(
reversed
);
// config.biasSize = 4;
config
.
testState
=
!
reversed
;
//
testLayerGrad
(
config
,
"recurrent"
,
50
,
/* trans= */
false
,
useGpu
);
// config.inputDefs.push_back(
}
// {INPUT_SEQUENCE_DATA, "layer_0", /* dim= */ 4, /* paraSize= */ 16});
}
// config.layerConfig.add_inputs();
}
//
// for (auto useGpu : {false, true}) {
TEST
(
Layer
,
LstmLayer
)
{
// for (auto reversed : {false, true}) {
TestConfig
config
;
// config.layerConfig.set_reversed(reversed);
config
.
layerConfig
.
set_type
(
"lstmemory"
);
// config.testState = !reversed;
config
.
layerConfig
.
set_size
(
4
);
// testLayerGrad(config, "recurrent", 50, /* trans= */ false, useGpu);
config
.
layerConfig
.
set_active_type
(
"tanh"
);
// }
config
.
layerConfig
.
set_active_state_type
(
"sigmoid"
);
// }
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// }
config
.
biasSize
=
28
;
//
// TEST(Layer, LstmLayer) {
config
.
inputDefs
.
push_back
(
// TestConfig config;
{
INPUT_SEQUENCE_DATA
,
"layer_0"
,
/* dim= */
16
,
/* paraSize= */
64
});
// config.layerConfig.set_type("lstmemory");
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_size(4);
// config.layerConfig.set_active_type("tanh");
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_active_state_type("sigmoid");
for
(
auto
reversed
:
{
false
,
true
})
{
// config.layerConfig.set_active_gate_type("sigmoid");
config
.
layerConfig
.
set_reversed
(
reversed
);
// config.biasSize = 28;
config
.
testState
=
!
reversed
;
//
testLayerGrad
(
config
,
"lstmemory"
,
100
,
/* trans= */
false
,
useGpu
);
// config.inputDefs.push_back(
}
// {INPUT_SEQUENCE_DATA, "layer_0", /* dim= */ 16, /* paraSize= */ 64});
}
// config.layerConfig.add_inputs();
for
(
auto
useGpu
:
{
true
})
{
//
config
.
testBatchState
=
true
;
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_reversed
(
false
);
// for (auto reversed : {false, true}) {
testLayerGrad
(
config
,
"lstmemory"
,
10
,
/* trans= */
false
,
useGpu
);
// config.layerConfig.set_reversed(reversed);
}
// config.testState = !reversed;
}
// testLayerGrad(config, "lstmemory", 100, /* trans= */ false, useGpu);
// }
TEST
(
Layer
,
MDLstmLayer
)
{
// }
TestConfig
config
;
// for (auto useGpu : {true}) {
config
.
layerConfig
.
set_type
(
"mdlstmemory"
);
// config.testBatchState = true;
config
.
layerConfig
.
set_size
(
4
);
// config.layerConfig.set_reversed(false);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// testLayerGrad(config, "lstmemory", 10, /* trans= */ false, useGpu);
config
.
layerConfig
.
set_active_state_type
(
"sigmoid"
);
// }
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// }
config
.
biasSize
=
4
*
9
;
//
// TEST(Layer, MDLstmLayer) {
config
.
inputDefs
.
push_back
(
// TestConfig config;
{
INPUT_SEQUENCE_MDIM_DATA
,
"layer_0"
,
4
*
5
,
4
*
4
*
5
});
// config.layerConfig.set_type("mdlstmemory");
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_size(4);
config
.
layerConfig
.
add_directions
(
true
);
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
add_directions
(
true
);
// config.layerConfig.set_active_state_type("sigmoid");
// config.layerConfig.set_active_gate_type("sigmoid");
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.biasSize = 4 * 9;
for
(
int
i
=
0
;
i
<
2
;
i
++
)
{
//
for
(
int
j
=
0
;
j
<
2
;
j
++
)
{
// config.inputDefs.push_back(
config
.
layerConfig
.
set_directions
(
0
,
bool
(
i
));
// {INPUT_SEQUENCE_MDIM_DATA, "layer_0", 4 * 5, 4 * 4 * 5});
config
.
layerConfig
.
set_directions
(
1
,
bool
(
j
));
// config.layerConfig.add_inputs();
testLayerGrad
(
config
,
"mdlstmemory"
,
100
,
false
,
useGpu
);
// config.layerConfig.add_directions(true);
}
// config.layerConfig.add_directions(true);
}
//
}
// for (auto useGpu : {false, true}) {
}
// for (int i = 0; i < 2; i++) {
// for (int j = 0; j < 2; j++) {
TEST
(
Layer
,
ParameterReluLayer
)
{
// config.layerConfig.set_directions(0, bool(i));
auto
testParameterReluLayer
=
[
&
](
size_t
inputSize
,
size_t
channels
)
{
// config.layerConfig.set_directions(1, bool(j));
TestConfig
config
;
// testLayerGrad(config, "mdlstmemory", 100, false, useGpu);
config
.
layerConfig
.
set_type
(
"prelu"
);
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
inputSize
,
channels
});
// }
config
.
layerConfig
.
add_inputs
();
// }
config
.
layerConfig
.
set_size
(
inputSize
);
// }
config
.
layerConfig
.
set_partial_sum
(
inputSize
/
//
channels
);
// size of feature map
// TEST(Layer, ParameterReluLayer) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// auto testParameterReluLayer = [&](size_t inputSize, size_t channels) {
testLayerGrad
(
config
,
"prelu"
,
100
,
false
,
useGpu
);
// TestConfig config;
}
// config.layerConfig.set_type("prelu");
};
// config.inputDefs.push_back({INPUT_DATA, "layer_0", inputSize, channels});
// config.layerConfig.add_inputs();
testParameterReluLayer
(
192
,
1
);
// config.layerConfig.set_size(inputSize);
testParameterReluLayer
(
192
,
3
);
// config.layerConfig.set_partial_sum(inputSize /
testParameterReluLayer
(
192
,
192
);
// channels); // size of feature map
}
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "prelu", 100, false, useGpu);
TEST
(
Layer
,
ResizeLayer
)
{
// }
TestConfig
config
;
// };
config
.
biasSize
=
0
;
//
config
.
layerConfig
.
set_type
(
"resize"
);
// testParameterReluLayer(192, 1);
config
.
layerConfig
.
set_size
(
64
);
// testParameterReluLayer(192, 3);
// testParameterReluLayer(192, 192);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
16
,
0
});
// }
config
.
layerConfig
.
add_inputs
();
//
// TEST(Layer, ResizeLayer) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// TestConfig config;
testLayerGrad
(
config
,
"resize"
,
100
,
false
,
useGpu
);
// config.biasSize = 0;
}
// config.layerConfig.set_type("resize");
}
// config.layerConfig.set_size(64);
//
TEST
(
Layer
,
RotateLayer
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 16, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
biasSize
=
0
;
//
config
.
layerConfig
.
set_type
(
"rotate"
);
// for (auto useGpu : {false, true}) {
const
int
CHANNEL
=
2
;
// testLayerGrad(config, "resize", 100, false, useGpu);
const
int
HEIGHT
=
8
;
// }
const
int
WIDTH
=
4
;
// }
const
int
INPUT_SIZE
=
HEIGHT
*
WIDTH
*
CHANNEL
;
//
config
.
layerConfig
.
set_size
(
INPUT_SIZE
);
// TEST(Layer, RotateLayer) {
config
.
layerConfig
.
set_height
(
HEIGHT
);
// TestConfig config;
config
.
layerConfig
.
set_width
(
WIDTH
);
// config.biasSize = 0;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
INPUT_SIZE
,
0
});
// config.layerConfig.set_type("rotate");
config
.
layerConfig
.
add_inputs
();
// const int CHANNEL = 2;
// const int HEIGHT = 8;
for
(
auto
useGpu
:
{
false
,
true
})
{
// const int WIDTH = 4;
testLayerGrad
(
config
,
"rotate"
,
100
,
false
,
useGpu
);
// const int INPUT_SIZE = HEIGHT * WIDTH * CHANNEL;
}
// config.layerConfig.set_size(INPUT_SIZE);
}
// config.layerConfig.set_height(HEIGHT);
// config.layerConfig.set_width(WIDTH);
TEST
(
Layer
,
NCELayer
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_0", INPUT_SIZE, 0});
TestConfig
config
;
// config.layerConfig.add_inputs();
size_t
numClasses
=
4
;
//
config
.
layerConfig
.
set_type
(
"nce"
);
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_size
(
1
);
// testLayerGrad(config, "rotate", 100, false, useGpu);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// }
config
.
layerConfig
.
set_num_classes
(
numClasses
);
// }
config
.
biasSize
=
numClasses
;
//
// TEST(Layer, NCELayer) {
config
.
inputDefs
.
push_back
(
// TestConfig config;
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
16
,
/* paraSize= */
16
*
numClasses
});
// size_t numClasses = 4;
config
.
inputDefs
.
push_back
(
// config.layerConfig.set_type("nce");
{
INPUT_LABEL
,
"label"
,
/* dim= */
numClasses
,
/* paraSize= */
0
});
// config.layerConfig.set_size(1);
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_num_classes(numClasses);
// config.biasSize = numClasses;
for
(
auto
withWeight
:
{
false
,
true
})
{
//
if
(
withWeight
)
{
// config.inputDefs.push_back(
config
.
inputDefs
.
push_back
(
// {INPUT_DATA, "layer_0", /* dim= */ 16, /* paraSize= */ 16 *
{
INPUT_DATA_TARGET
,
"weight"
,
/* dim= */
1
,
/* paraSize= */
0
});
// numClasses});
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back(
}
// {INPUT_LABEL, "label", /* dim= */ numClasses, /* paraSize= */ 0});
// config.layerConfig.add_inputs();
for
(
auto
isIdLabel
:
{
false
,
true
})
{
// config.layerConfig.add_inputs();
config
.
inputDefs
[
1
]
=
{
//
isIdLabel
?
INPUT_LABEL
:
INPUT_SPARSE_NON_VALUE_DATA
,
// for (auto withWeight : {false, true}) {
"label"
,
// if (withWeight) {
/* dim= */
numClasses
,
// config.inputDefs.push_back(
/* paraSize= */
0
};
// {INPUT_DATA_TARGET, "weight", /* dim= */ 1, /* paraSize= */ 0});
// config.layerConfig.add_inputs();
for
(
auto
withDist
:
{
false
,
true
})
{
// }
config
.
layerConfig
.
clear_neg_sampling_dist
();
//
if
(
withDist
)
{
// for (auto isIdLabel : {false, true}) {
double
sum
=
0
;
// config.inputDefs[1] = {
for
(
size_t
i
=
0
;
i
<
numClasses
;
++
i
)
{
// isIdLabel ? INPUT_LABEL : INPUT_SPARSE_NON_VALUE_DATA,
real
p
=
rand
();
// NOLINT use rand_r
// "label",
config
.
layerConfig
.
add_neg_sampling_dist
(
p
);
// /* dim= */ numClasses,
sum
+=
p
;
// /* paraSize= */ 0};
}
//
for
(
size_t
i
=
0
;
i
<
numClasses
;
++
i
)
{
// for (auto withDist : {false, true}) {
real
p
=
config
.
layerConfig
.
neg_sampling_dist
(
i
)
/
sum
;
// config.layerConfig.clear_neg_sampling_dist();
config
.
layerConfig
.
set_neg_sampling_dist
(
i
,
p
);
// if (withDist) {
}
// double sum = 0;
}
// for (size_t i = 0; i < numClasses; ++i) {
LOG
(
INFO
)
<<
"NCELayer "
// real p = rand(); // NOLINT use rand_r
<<
" isIdLabel="
<<
isIdLabel
<<
" withWeight="
<<
withWeight
// config.layerConfig.add_neg_sampling_dist(p);
<<
" withDist="
<<
withDist
;
// sum += p;
// Not support GPU now
// }
testLayerGrad
(
config
,
// for (size_t i = 0; i < numClasses; ++i) {
"nce"
,
// real p = config.layerConfig.neg_sampling_dist(i) / sum;
100
,
// config.layerConfig.set_neg_sampling_dist(i, p);
/* trans= */
false
,
// }
/* useGpu */
false
);
// }
}
// LOG(INFO) << "NCELayer "
}
// << " isIdLabel=" << isIdLabel << " withWeight=" <<
}
// withWeight
}
// << " withDist=" << withDist;
// // Not support GPU now
TEST
(
Layer
,
GatedRecurrentLayer
)
{
// testLayerGrad(config,
TestConfig
config
;
// "nce",
config
.
layerConfig
.
set_type
(
"gated_recurrent"
);
// 100,
config
.
layerConfig
.
set_size
(
4
);
// /* trans= */ false,
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// /* useGpu */ false);
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// }
config
.
biasSize
=
12
;
// }
// }
config
.
inputDefs
.
push_back
(
// }
{
INPUT_SEQUENCE_DATA
,
"layer_0"
,
/* dim= */
12
,
/* paraSize= */
48
});
//
config
.
layerConfig
.
add_inputs
();
// TEST(Layer, GatedRecurrentLayer) {
// TestConfig config;
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_type("gated_recurrent");
for
(
auto
reversed
:
{
false
,
true
})
{
// config.layerConfig.set_size(4);
config
.
layerConfig
.
set_reversed
(
reversed
);
// config.layerConfig.set_active_type("sigmoid");
config
.
testState
=
!
reversed
;
// config.layerConfig.set_active_gate_type("sigmoid");
testLayerGrad
(
config
,
"gated_recurrent"
,
100
,
/* trans= */
false
,
useGpu
);
// config.biasSize = 12;
}
//
}
// config.inputDefs.push_back(
}
// {INPUT_SEQUENCE_DATA, "layer_0", /* dim= */ 12, /* paraSize= */ 48});
// config.layerConfig.add_inputs();
TEST
(
Layer
,
GruStepLayer
)
{
//
TestConfig
config
;
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_type
(
"gru_step"
);
// for (auto reversed : {false, true}) {
config
.
layerConfig
.
set_size
(
4
);
// config.layerConfig.set_reversed(reversed);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.testState = !reversed;
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// testLayerGrad(config, "gated_recurrent", 100, /* trans= */ false,
config
.
biasSize
=
12
;
// useGpu);
// }
config
.
inputDefs
.
push_back
(
// }
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
12
,
/* paraSize= */
48
});
// }
config
.
inputDefs
.
push_back
(
//
{
INPUT_DATA
,
"layer_1"
,
/* dim= */
4
,
/* paraSize= */
0
});
// TEST(Layer, GruStepLayer) {
config
.
layerConfig
.
add_inputs
();
// TestConfig config;
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_type("gru_step");
// config.layerConfig.set_size(4);
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_active_type("sigmoid");
testLayerGrad
(
config
,
"gruStep"
,
100
,
/* trans= */
false
,
useGpu
);
// config.layerConfig.set_active_gate_type("sigmoid");
}
// config.biasSize = 12;
}
//
// config.inputDefs.push_back(
TEST
(
Layer
,
LstmStepLayer
)
{
// {INPUT_DATA, "layer_0", /* dim= */ 12, /* paraSize= */ 48});
TestConfig
config
;
// config.inputDefs.push_back(
config
.
layerConfig
.
set_type
(
"lstm_step"
);
// {INPUT_DATA, "layer_1", /* dim= */ 4, /* paraSize= */ 0});
config
.
layerConfig
.
set_size
(
4
);
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_active_state_type
(
"sigmoid"
);
//
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// for (auto useGpu : {false, true}) {
config
.
biasSize
=
12
;
// testLayerGrad(config, "gruStep", 100, /* trans= */ false, useGpu);
config
.
testAccumulate
=
false
;
// }
// }
config
.
inputDefs
.
push_back
(
//
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
16
,
/* paraSize= */
0
});
// TEST(Layer, LstmStepLayer) {
config
.
inputDefs
.
push_back
(
// TestConfig config;
{
INPUT_DATA
,
"layer_1"
,
/* dim= */
4
,
/* paraSize= */
0
});
// config.layerConfig.set_type("lstm_step");
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_size(4);
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_active_type("sigmoid");
// config.layerConfig.set_active_state_type("sigmoid");
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_active_gate_type("sigmoid");
testLayerGrad
(
config
,
"lstmStep"
,
100
,
/* trans= */
false
,
useGpu
);
// config.biasSize = 12;
}
// config.testAccumulate = false;
}
//
// config.inputDefs.push_back(
void
testBatchNormLayer
(
const
string
&
type
,
bool
trans
,
bool
useGpu
)
{
// {INPUT_DATA, "layer_0", /* dim= */ 16, /* paraSize= */ 0});
TestConfig
config
;
// config.inputDefs.push_back(
const
int
CHANNELS
=
10
;
// {INPUT_DATA, "layer_1", /* dim= */ 4, /* paraSize= */ 0});
const
int
IMG_SIZE
=
16
;
// config.layerConfig.add_inputs();
const
int
IMG_SIZE_Y
=
8
;
// config.layerConfig.add_inputs();
size_t
size
=
CHANNELS
*
IMG_SIZE
*
IMG_SIZE_Y
;
//
config
.
layerConfig
.
set_type
(
type
);
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_size
(
size
);
// testLayerGrad(config, "lstmStep", 100, /* trans= */ false, useGpu);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// }
config
.
biasSize
=
CHANNELS
;
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
//
"layer_0"
,
// void testBatchNormLayer(const string& type, bool trans, bool useGpu) {
/* dim= */
size
,
// TestConfig config;
/* paraSize= */
CHANNELS
});
// const int CHANNELS = 10;
// const int IMG_SIZE = 16;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1_running_mean"
,
1
,
CHANNELS
});
// const int IMG_SIZE_Y = 8;
config
.
inputDefs
.
back
().
isStatic
=
true
;
// size_t size = CHANNELS * IMG_SIZE * IMG_SIZE_Y;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_2_running_var"
,
1
,
CHANNELS
});
// config.layerConfig.set_type(type);
config
.
inputDefs
.
back
().
isStatic
=
true
;
// config.layerConfig.set_size(size);
// config.layerConfig.set_active_type("sigmoid");
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// config.biasSize = CHANNELS;
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA,
config
.
layerConfig
.
add_inputs
();
// "layer_0",
// /* dim= */ size,
ImageConfig
*
img_conf
=
input
->
mutable_image_conf
();
// /* paraSize= */ CHANNELS});
img_conf
->
set_channels
(
CHANNELS
);
//
img_conf
->
set_img_size
(
IMG_SIZE
);
// config.inputDefs.push_back({INPUT_DATA, "layer_1_running_mean", 1,
img_conf
->
set_img_size_y
(
IMG_SIZE_Y
);
// CHANNELS});
// config.inputDefs.back().isStatic = true;
testLayerGrad
(
config
,
// config.inputDefs.push_back({INPUT_DATA, "layer_2_running_var", 1,
"batch_norm"
,
// CHANNELS});
64
,
// config.inputDefs.back().isStatic = true;
/* trans= */
trans
,
//
useGpu
,
// LayerInputConfig* input = config.layerConfig.add_inputs();
/* useWeight */
true
);
// config.layerConfig.add_inputs();
}
// config.layerConfig.add_inputs();
//
TEST
(
Layer
,
BatchNormalizationLayer
)
{
// ImageConfig* img_conf = input->mutable_image_conf();
testBatchNormLayer
(
"batch_norm"
,
false
,
false
);
// img_conf->set_channels(CHANNELS);
#ifndef PADDLE_ONLY_CPU
// img_conf->set_img_size(IMG_SIZE);
testBatchNormLayer
(
"batch_norm"
,
false
,
true
);
// img_conf->set_img_size_y(IMG_SIZE_Y);
if
(
hl_get_cudnn_lib_version
()
>=
int
(
4000
))
{
//
testBatchNormLayer
(
"cudnn_batch_norm"
,
false
,
true
);
// testLayerGrad(config,
}
// "batch_norm",
#endif
// 64,
}
// /* trans= */ trans,
// useGpu,
void
testConvOperator
(
bool
isDeconv
)
{
// /* useWeight */ true);
TestConfig
config
;
// }
const
int
NUM_FILTERS
=
16
;
//
const
int
FILTER_SIZE
=
2
;
// TEST(Layer, BatchNormalizationLayer) {
const
int
FILTER_SIZE_Y
=
3
;
// testBatchNormLayer("batch_norm", false, false);
const
int
CHANNELS
=
3
;
// #ifndef PADDLE_ONLY_CPU
const
int
IMAGE_SIZE
=
16
;
// testBatchNormLayer("batch_norm", false, true);
const
int
IMAGE_SIZE_Y
=
9
;
// if (hl_get_cudnn_lib_version() >= int(4000)) {
OperatorConfig
&
operatorConf
=
*
config
.
layerConfig
.
add_operator_confs
();
// testBatchNormLayer("cudnn_batch_norm", false, true);
if
(
isDeconv
)
{
// }
operatorConf
.
set_type
(
"convt"
);
// #endif
}
else
{
// }
operatorConf
.
set_type
(
"conv"
);
//
}
// void testConvOperator(bool isDeconv) {
ConvConfig
*
conv
=
operatorConf
.
mutable_conv_conf
();
// TestConfig config;
operatorConf
.
set_num_filters
(
NUM_FILTERS
);
// const int NUM_FILTERS = 16;
conv
->
set_filter_size
(
FILTER_SIZE
);
// const int FILTER_SIZE = 2;
conv
->
set_filter_size_y
(
FILTER_SIZE_Y
);
// const int FILTER_SIZE_Y = 3;
conv
->
set_channels
(
CHANNELS
);
// const int CHANNELS = 3;
conv
->
set_padding
(
0
);
// const int IMAGE_SIZE = 16;
conv
->
set_padding_y
(
1
);
// const int IMAGE_SIZE_Y = 9;
conv
->
set_stride
(
2
);
// OperatorConfig& operatorConf = *config.layerConfig.add_operator_confs();
conv
->
set_stride_y
(
2
);
// if (isDeconv) {
conv
->
set_groups
(
1
);
// operatorConf.set_type("convt");
conv
->
set_img_size
(
IMAGE_SIZE
);
// } else {
conv
->
set_img_size_y
(
IMAGE_SIZE_Y
);
// operatorConf.set_type("conv");
conv
->
set_output_x
(
outputSize
(
conv
->
img_size
(),
// }
conv
->
filter_size
(),
// ConvConfig* conv = operatorConf.mutable_conv_conf();
conv
->
padding
(),
// operatorConf.set_num_filters(NUM_FILTERS);
conv
->
stride
(),
// conv->set_filter_size(FILTER_SIZE);
/* caffeMode */
true
));
// conv->set_filter_size_y(FILTER_SIZE_Y);
conv
->
set_output_y
(
outputSize
(
conv
->
img_size_y
(),
// conv->set_channels(CHANNELS);
conv
->
filter_size_y
(),
// conv->set_padding(0);
conv
->
padding_y
(),
// conv->set_padding_y(1);
conv
->
stride_y
(),
// conv->set_stride(2);
/* caffeMode */
true
));
// conv->set_stride_y(2);
// conv->set_groups(1);
if
(
isDeconv
)
{
// conv->set_img_size(IMAGE_SIZE);
conv
->
set_filter_channels
(
NUM_FILTERS
/
conv
->
groups
());
// conv->set_img_size_y(IMAGE_SIZE_Y);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
// conv->set_output_x(outputSize(conv->img_size(),
"layer_0"
,
// conv->filter_size(),
conv
->
output_x
()
*
conv
->
output_y
()
*
CHANNELS
,
// conv->padding(),
0
});
// conv->stride(),
config
.
layerConfig
.
set_size
(
IMAGE_SIZE
*
IMAGE_SIZE_Y
*
NUM_FILTERS
);
// /* caffeMode */ true));
}
else
{
// conv->set_output_y(outputSize(conv->img_size_y(),
conv
->
set_filter_channels
(
conv
->
channels
()
/
conv
->
groups
());
// conv->filter_size_y(),
config
.
inputDefs
.
push_back
(
// conv->padding_y(),
{
INPUT_DATA
,
"layer_0"
,
IMAGE_SIZE
*
IMAGE_SIZE_Y
*
CHANNELS
,
0
});
// conv->stride_y(),
config
.
layerConfig
.
set_size
(
conv
->
output_x
()
*
conv
->
output_y
()
*
// /* caffeMode */ true));
NUM_FILTERS
);
//
}
// if (isDeconv) {
// conv->set_filter_channels(NUM_FILTERS / conv->groups());
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back({INPUT_DATA,
{
INPUT_DATA
,
// "layer_0",
"layer_1"
,
// conv->output_x() * conv->output_y() *
FILTER_SIZE
*
FILTER_SIZE_Y
*
CHANNELS
*
NUM_FILTERS
,
// CHANNELS,
0
});
// 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_size(IMAGE_SIZE * IMAGE_SIZE_Y * NUM_FILTERS);
config
.
layerConfig
.
add_inputs
();
// } else {
// conv->set_filter_channels(conv->channels() / conv->groups());
testOperatorGrad
(
config
,
operatorConf
,
100
,
/*useGpu*/
true
,
false
);
// config.inputDefs.push_back(
}
// {INPUT_DATA, "layer_0", IMAGE_SIZE * IMAGE_SIZE_Y * CHANNELS, 0});
// config.layerConfig.set_size(conv->output_x() * conv->output_y() *
TEST
(
Operator
,
conv
)
{
// NUM_FILTERS);
testConvOperator
(
/*isDeconv*/
true
);
// }
testConvOperator
(
/*isDeconv*/
false
);
//
}
// config.inputDefs.push_back(
// {INPUT_DATA,
TEST
(
Layer
,
FeatureMapExpandLayer
)
{
// "layer_1",
TestConfig
config
;
// FILTER_SIZE * FILTER_SIZE_Y * CHANNELS * NUM_FILTERS,
config
.
layerConfig
.
set_type
(
"featmap_expand"
);
// 0});
const
int
CHANNELS
=
10
;
// config.layerConfig.add_inputs();
const
int
INPUT_SIZE
=
100
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_size
(
INPUT_SIZE
*
CHANNELS
);
//
config
.
layerConfig
.
set_num_filters
(
CHANNELS
);
// testOperatorGrad(config, operatorConf, 100, /*useGpu*/ true, false);
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
// }
"layer_0"
,
//
/* dim= */
INPUT_SIZE
,
// TEST(Operator, conv) {
/* paraSize= */
0
});
// testConvOperator(/*isDeconv*/ true);
config
.
layerConfig
.
add_inputs
();
// testConvOperator(/*isDeconv*/ false);
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
for
(
auto
asRowVec
:
{
false
,
true
})
{
//
config
.
layerConfig
.
set_user_arg
(
asRowVec
?
"as_row_vec"
:
"as_col_vec"
);
// TEST(Layer, FeatureMapExpandLayer) {
testLayerGrad
(
config
,
// TestConfig config;
"featmap_expand"
,
// config.layerConfig.set_type("featmap_expand");
/*batch_size*/
100
,
// const int CHANNELS = 10;
/* trans= */
false
,
// const int INPUT_SIZE = 100;
useGpu
,
// config.layerConfig.set_size(INPUT_SIZE * CHANNELS);
/* useWeight */
true
);
// config.layerConfig.set_num_filters(CHANNELS);
}
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA,
}
// "layer_0",
}
// /* dim= */ INPUT_SIZE,
// /* paraSize= */ 0});
TEST
(
Layer
,
MultiplexLayer
)
{
// config.layerConfig.add_inputs();
TestConfig
config
;
// for (auto useGpu : {false, true}) {
const
int
LAYER_SIZE
=
100
;
// for (auto asRowVec : {false, true}) {
config
.
layerConfig
.
set_type
(
"multiplex"
);
// config.layerConfig.set_user_arg(asRowVec ? "as_row_vec" :
config
.
layerConfig
.
set_size
(
LAYER_SIZE
);
// "as_col_vec");
// testLayerGrad(config,
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_0"
,
2
,
0
});
// "featmap_expand",
config
.
inputDefs
.
push_back
(
// /*batch_size*/ 100,
{
INPUT_DATA
,
"layer_1"
,
/* dim= */
LAYER_SIZE
,
/* paraSize= */
0
});
// /* trans= */ false,
config
.
inputDefs
.
push_back
(
// useGpu,
{
INPUT_DATA
,
"layer_2"
,
/* dim= */
LAYER_SIZE
,
/* paraSize= */
0
});
// /* useWeight */ true);
config
.
layerConfig
.
add_inputs
();
// }
config
.
layerConfig
.
add_inputs
();
// }
config
.
layerConfig
.
add_inputs
();
// }
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// TEST(Layer, MultiplexLayer) {
testLayerGrad
(
config
,
"multiplex"
,
512
,
/* trans= */
false
,
useGpu
);
// TestConfig config;
}
// const int LAYER_SIZE = 100;
}
// config.layerConfig.set_type("multiplex");
// config.layerConfig.set_size(LAYER_SIZE);
TEST
(
Layer
,
PadLayer
)
{
//
TestConfig
config
;
// config.inputDefs.push_back({INPUT_LABEL, "layer_0", 2, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back(
config
.
layerConfig
.
set_type
(
"pad"
);
// {INPUT_DATA, "layer_1", /* dim= */ LAYER_SIZE, /* paraSize= */ 0});
// config.inputDefs.push_back(
int
c
=
4
;
// {INPUT_DATA, "layer_2", /* dim= */ LAYER_SIZE, /* paraSize= */ 0});
int
h
=
31
;
// config.layerConfig.add_inputs();
int
w
=
36
;
// config.layerConfig.add_inputs();
size_t
size
=
c
*
h
*
w
;
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
size
,
0
});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
PadConfig
*
pad
=
input
->
mutable_pad_conf
();
// testLayerGrad(config, "multiplex", 512, /* trans= */ false, useGpu);
ImageConfig
*
image
=
pad
->
mutable_image_conf
();
// }
// }
image
->
set_channels
(
c
);
//
image
->
set_img_size
(
h
);
// TEST(Layer, PadLayer) {
image
->
set_img_size_y
(
w
);
// TestConfig config;
pad
->
add_pad_c
(
1
);
// config.biasSize = 0;
pad
->
add_pad_c
(
2
);
// config.layerConfig.set_type("pad");
pad
->
add_pad_h
(
2
);
//
pad
->
add_pad_h
(
3
);
// int c = 4;
pad
->
add_pad_w
(
3
);
// int h = 31;
pad
->
add_pad_w
(
5
);
// int w = 36;
// size_t size = c * h * w;
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.inputDefs.push_back({INPUT_DATA, "layer_0", size, 0});
testLayerGrad
(
config
,
"pad"
,
10
,
false
,
useGpu
);
// LayerInputConfig* input = config.layerConfig.add_inputs();
}
// PadConfig* pad = input->mutable_pad_conf();
}
// ImageConfig* image = pad->mutable_image_conf();
//
TEST
(
Layer
,
CrossChannelNormLayer
)
{
// image->set_channels(c);
TestConfig
config
;
// image->set_img_size(h);
config
.
paramInitialMean
=
1.
;
// image->set_img_size_y(w);
config
.
paramInitialStd
=
0.
;
// pad->add_pad_c(1);
config
.
layerConfig
.
set_type
(
"norm"
);
// pad->add_pad_c(2);
config
.
layerConfig
.
set_size
(
100
);
// pad->add_pad_h(2);
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// pad->add_pad_h(3);
NormConfig
*
norm
=
input
->
mutable_norm_conf
();
// pad->add_pad_w(3);
norm
->
set_norm_type
(
"cross-channel-norm"
);
// pad->add_pad_w(5);
norm
->
set_channels
(
10
);
//
norm
->
set_size
(
100
);
// for (auto useGpu : {false, true}) {
norm
->
set_scale
(
0
);
// testLayerGrad(config, "pad", 10, false, useGpu);
norm
->
set_pow
(
0
);
// }
norm
->
set_blocked
(
0
);
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
100
,
10
});
//
// TEST(Layer, CrossChannelNormLayer) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// TestConfig config;
testLayerGrad
(
config
,
"cross-channel-norm"
,
10
,
false
,
useGpu
,
false
);
// config.paramInitialMean = 1.;
}
// config.paramInitialStd = 0.;
}
// config.layerConfig.set_type("norm");
// config.layerConfig.set_size(100);
TEST
(
Layer
,
smooth_l1
)
{
// LayerInputConfig* input = config.layerConfig.add_inputs();
TestConfig
config
;
// NormConfig* norm = input->mutable_norm_conf();
config
.
layerConfig
.
set_type
(
"smooth_l1"
);
// norm->set_norm_type("cross-channel-norm");
// norm->set_channels(10);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
200
,
0
});
// norm->set_size(100);
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_1"
,
200
,
0
});
// norm->set_scale(0);
config
.
layerConfig
.
add_inputs
();
// norm->set_pow(0);
config
.
layerConfig
.
add_inputs
();
// norm->set_blocked(0);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 100, 10});
for
(
auto
useGpu
:
{
false
,
true
})
{
//
testLayerGrad
(
config
,
"smooth_l1"
,
100
,
false
,
useGpu
,
false
);
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(config, "cross-channel-norm", 10, false, useGpu, false);
}
// }
// }
TEST
(
Layer
,
multibox_loss
)
{
//
TestConfig
config
;
// TEST(Layer, smooth_l1) {
config
.
layerConfig
.
set_type
(
"multibox_loss"
);
// TestConfig config;
config
.
biasSize
=
0
;
// config.layerConfig.set_type("smooth_l1");
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
//
MultiBoxLossConfig
*
multiboxLoss
=
input
->
mutable_multibox_loss_conf
();
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 200, 0});
multiboxLoss
->
set_num_classes
(
21
);
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 200, 0});
multiboxLoss
->
set_input_num
(
1
);
// config.layerConfig.add_inputs();
multiboxLoss
->
set_overlap_threshold
(
0.5
);
// config.layerConfig.add_inputs();
multiboxLoss
->
set_neg_pos_ratio
(
3
);
//
multiboxLoss
->
set_neg_overlap
(
0.5
);
// for (auto useGpu : {false, true}) {
multiboxLoss
->
set_background_id
(
0
);
// testLayerGrad(config, "smooth_l1", 100, false, useGpu, false);
multiboxLoss
->
set_height
(
3
);
// }
multiboxLoss
->
set_width
(
3
);
// }
//
size_t
gtNum
=
1
;
// TEST(Layer, multibox_loss) {
MatrixPtr
labelValue
=
Matrix
::
create
(
gtNum
,
6
,
false
,
false
);
// TestConfig config;
labelValue
->
randomizeUniform
();
// config.layerConfig.set_type("multibox_loss");
labelValue
->
add
(
-
0.5
);
// config.biasSize = 0;
labelValue
->
sigmoid
(
*
labelValue
);
// LayerInputConfig* input = config.layerConfig.add_inputs();
real
*
labelData
=
labelValue
->
getData
();
// MultiBoxLossConfig* multiboxLoss = input->mutable_multibox_loss_conf();
size_t
labelWidth
=
labelValue
->
getWidth
();
// multiboxLoss->set_num_classes(21);
for
(
size_t
i
=
0
;
i
<
gtNum
;
++
i
)
{
// multiboxLoss->set_input_num(1);
*
(
labelData
+
i
*
labelWidth
)
=
std
::
rand
()
%
20
+
1
;
// multiboxLoss->set_overlap_threshold(0.5);
*
(
labelData
+
i
*
labelWidth
+
1
)
=
0.400259
;
// multiboxLoss->set_neg_pos_ratio(3);
*
(
labelData
+
i
*
labelWidth
+
2
)
=
0.377857
;
// multiboxLoss->set_neg_overlap(0.5);
*
(
labelData
+
i
*
labelWidth
+
3
)
=
0.525712
;
// multiboxLoss->set_background_id(0);
*
(
labelData
+
i
*
labelWidth
+
4
)
=
0.519368
;
// multiboxLoss->set_height(3);
}
// multiboxLoss->set_width(3);
vector
<
int
>
seqStartPositions
(
gtNum
+
1
,
0
);
//
for
(
size_t
i
=
1
;
i
<=
gtNum
;
++
i
)
{
// size_t gtNum = 1;
seqStartPositions
[
i
]
=
i
;
// MatrixPtr labelValue = Matrix::create(gtNum, 6, false, false);
}
// labelValue->randomizeUniform();
// labelValue->add(-0.5);
// Ensure at lease one matched bbox
// labelValue->sigmoid(*labelValue);
MatrixPtr
priorValue
=
Matrix
::
create
(
1
,
72
,
false
,
false
);
// real* labelData = labelValue->getData();
priorValue
->
randomizeUniform
();
// size_t labelWidth = labelValue->getWidth();
priorValue
->
add
(
-
0.5
);
// for (size_t i = 0; i < gtNum; ++i) {
priorValue
->
sigmoid
(
*
priorValue
);
// *(labelData + i * labelWidth) = std::rand() % 20 + 1;
real
*
priorData
=
priorValue
->
getData
();
// *(labelData + i * labelWidth + 1) = 0.400259;
*
(
priorData
)
=
0.424811
;
// *(labelData + i * labelWidth + 2) = 0.377857;
*
(
priorData
+
1
)
=
0.397059
;
// *(labelData + i * labelWidth + 3) = 0.525712;
*
(
priorData
+
2
)
=
0.538905
;
// *(labelData + i * labelWidth + 4) = 0.519368;
*
(
priorData
+
3
)
=
0.447091
;
// }
*
(
priorData
+
4
)
=
0.425720
;
// vector<int> seqStartPositions(gtNum + 1, 0);
*
(
priorData
+
5
)
=
0.515228
;
// for (size_t i = 1; i <= gtNum; ++i) {
*
(
priorData
+
6
)
=
0.519452
;
// seqStartPositions[i] = i;
*
(
priorData
+
7
)
=
0.591065
;
// }
//
config
.
inputDefs
.
push_back
(
// // Ensure at lease one matched bbox
{
INPUT_SELF_DEFINE_DATA
,
"priorbox"
,
priorValue
,
{}});
// MatrixPtr priorValue = Matrix::create(1, 72, false, false);
config
.
inputDefs
.
push_back
(
// priorValue->randomizeUniform();
{
INPUT_SELF_DEFINE_DATA
,
"label"
,
labelValue
,
seqStartPositions
});
// priorValue->add(-0.5);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"locPred"
,
36
,
0
});
// priorValue->sigmoid(*priorValue);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"confPred"
,
189
,
0
});
// real* priorData = priorValue->getData();
config
.
layerConfig
.
add_inputs
();
// *(priorData) = 0.424811;
config
.
layerConfig
.
add_inputs
();
// *(priorData + 1) = 0.397059;
config
.
layerConfig
.
add_inputs
();
// *(priorData + 2) = 0.538905;
// *(priorData + 3) = 0.447091;
for
(
auto
useGpu
:
{
false
,
true
})
{
// *(priorData + 4) = 0.425720;
testLayerGrad
(
config
,
"multibox_loss"
,
1
,
false
,
useGpu
,
false
);
// *(priorData + 5) = 0.515228;
}
// *(priorData + 6) = 0.519452;
}
// *(priorData + 7) = 0.591065;
//
TEST
(
Layer
,
TransLayer
)
{
// config.inputDefs.push_back(
TestConfig
config
;
// {INPUT_SELF_DEFINE_DATA, "priorbox", priorValue, {}});
const
int
height
=
128
;
// config.inputDefs.push_back(
const
int
width
=
1028
;
// {INPUT_SELF_DEFINE_DATA, "label", labelValue, seqStartPositions});
config
.
layerConfig
.
set_type
(
"trans"
);
// config.inputDefs.push_back({INPUT_DATA, "locPred", 36, 0});
config
.
layerConfig
.
set_size
(
width
);
// config.inputDefs.push_back({INPUT_DATA, "confPred", 189, 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
(
// config.layerConfig.add_inputs();
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
height
*
width
,
/* paraSize= */
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "multibox_loss", 1, false, useGpu, false);
testLayerGrad
(
config
,
"trans"
,
height
,
/* trans= */
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, TransLayer) {
TEST
(
Layer
,
RowConvLayer
)
{
// TestConfig config;
const
int
context
=
3
;
// const int height = 128;
const
int
size
=
512
;
// const int width = 1028;
// config.layerConfig.set_type("trans");
TestConfig
config
;
// config.layerConfig.set_size(width);
config
.
layerConfig
.
set_type
(
"row_conv"
);
//
config
.
layerConfig
.
set_size
(
size
);
// config.inputDefs.push_back(
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// {INPUT_DATA, "layer_0", /* dim= */ height * width, /* paraSize= */ 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
(
//
{
INPUT_SEQUENCE_DATA
,
"layer_0"
,
size
,
context
*
size
});
// for (auto useGpu : {false, true}) {
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "trans", height, /* trans= */ false, useGpu);
RowConvConfig
*
conv
=
input
->
mutable_row_conv_conf
();
// }
conv
->
set_context_length
(
context
);
// }
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// TEST(Layer, RowConvLayer) {
testLayerGrad
(
config
,
"row_conv"
,
100
,
false
,
useGpu
,
false
);
// const int context = 3;
}
// const int size = 512;
}
//
// TestConfig config;
TEST
(
Layer
,
CropLayer
)
{
// config.layerConfig.set_type("row_conv");
TestConfig
config
;
// config.layerConfig.set_size(size);
// config input_0
// config.layerConfig.set_active_type("sigmoid");
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1024
,
0
});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back(
ImageConfig
*
img
=
input
->
mutable_image_conf
();
// {INPUT_SEQUENCE_DATA, "layer_0", size, context * size});
img
->
set_channels
(
4
);
// LayerInputConfig* input = config.layerConfig.add_inputs();
img
->
set_img_size
(
16
);
// RowConvConfig* conv = input->mutable_row_conv_conf();
config
.
layerConfig
.
set_axis
(
2
);
// conv->set_context_length(context);
config
.
layerConfig
.
add_offset
(
0
);
//
config
.
layerConfig
.
add_offset
(
0
);
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "row_conv", 100, false, useGpu, false);
// config input_1
// }
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
128
,
0
});
// }
input
=
config
.
layerConfig
.
add_inputs
();
//
img
=
input
->
mutable_image_conf
();
// TEST(Layer, CropLayer) {
img
->
set_channels
(
2
);
// TestConfig config;
img
->
set_img_size
(
8
);
// // config input_0
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 0});
// config crop layer
// LayerInputConfig* input = config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"crop"
);
// ImageConfig* img = input->mutable_image_conf();
config
.
layerConfig
.
set_name
(
"cropLayer"
);
// img->set_channels(4);
// img->set_img_size(16);
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_axis(2);
testLayerGrad
(
config
,
"crop"
,
100
,
false
,
useGpu
,
false
);
// config.layerConfig.add_offset(0);
}
// config.layerConfig.add_offset(0);
//
// // config input_1
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 128, 0});
// input = config.layerConfig.add_inputs();
// img = input->mutable_image_conf();
// img->set_channels(2);
// img->set_img_size(8);
//
// // config crop layer
// config.layerConfig.set_type("crop");
// config.layerConfig.set_name("cropLayer");
//
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "crop", 100, false, useGpu, false);
// }
// }
vector
<
real
>
randSampling
(
real
range
,
int
n
)
{
CHECK_GE
(
range
,
n
);
vector
<
real
>
num
(
range
);
iota
(
begin
(
num
),
end
(
num
),
0.
);
if
(
range
==
n
)
return
num
;
random_shuffle
(
begin
(
num
),
end
(
num
));
num
.
resize
(
n
);
sort
(
begin
(
num
),
end
(
num
));
return
num
;
}
}
TEST
(
Layer
,
SubNestedSequenceLayer
)
{
TEST
(
Layer
,
SubNestedSequenceLayer
)
{
const
int
layerSize
=
128
;
// layer size is not crutial for this layer,
// so use a small layer size in unittest
const
int
layerSize
=
8
;
const
int
maxSeqNum
=
5
;
const
int
maxSeqLen
=
5
;
const
int
beamSize
=
3
;
TestConfig
config
;
TestConfig
config
;
config
.
layerConfig
.
set_type
(
"sub_nested_seq"
);
config
.
layerConfig
.
set_type
(
"sub_nested_seq"
);
config
.
layerConfig
.
set_top_k
(
2
);
config
.
layerConfig
.
set_name
(
"sub_nested_seq_layer"
);
config
.
layerConfig
.
set_name
(
"sub_nested_seq_layer"
);
config
.
layerConfig
.
set_size
(
layerSize
);
config
.
layerConfig
.
set_size
(
layerSize
);
// Generate the first input
// srand((size_t)(time(NULL)));
srand
((
size_t
)(
time
(
NULL
)));
srand
(
1
);
const
int
batchSize
=
128
;
int
seqNum
=
1
+
(
rand
()
%
maxSeqNum
);
const
int
maxSeqLen
=
100
;
const
int
maxSubSeqNum
=
50
;
// sequence information for the first input, it is a nested sequence
// sequenceStartPositioins info for the first input.
vector
<
int
>
seqStartPos
(
seqNum
+
1
,
0
);
vector
<
int
>
seqStartPos1
(
batchSize
+
1
,
0
);
vector
<
int
>
subSeqStartPos
(
1
,
0
);
// subSequenceStartPositioins info for the first input.
vector
<
int
>
subSeqStartPos
;
// selected indices
subSeqStartPos
.
push_back
(
0
);
MatrixPtr
selectedIndices
=
Matrix
::
create
(
seqNum
,
beamSize
,
false
,
false
);
selectedIndices
->
one
();
// sequenceStartPositioins info for the second input.
selectedIndices
->
mulScalar
(
-
1.
);
vector
<
int
>
seqStartPos2
(
batchSize
+
1
,
0
);
real
*
indicesData
=
selectedIndices
->
getData
();
size_t
curPos
=
0
;
for
(
int
i
=
0
;
i
<
seqNum
;
++
i
)
{
for
(
int
i
=
1
;
i
<
batchSize
+
1
;
++
i
)
{
int
subSeqNum
=
1
+
(
rand
()
%
maxSeqNum
);
int
seqNum
=
uniformRandom
(
maxSubSeqNum
);
for
(
int
j
=
0
;
j
<
subSeqNum
;
++
j
)
{
seqStartPos2
[
i
]
=
seqStartPos2
[
i
-
1
]
+
seqNum
;
subSeqStartPos
.
push_back
(
subSeqStartPos
.
back
()
+
for
(
int
j
=
0
;
j
<
seqNum
;
++
j
)
{
(
1
+
(
rand
()
%
maxSeqLen
)));
int
seqLen
=
uniformRandom
(
maxSeqLen
);
subSeqStartPos
.
push_back
(
curPos
+
seqLen
);
curPos
+=
seqLen
;
}
}
seqStartPos1
[
i
]
=
curPos
;
vector
<
real
>
selSeqs
=
randSampling
(
static_cast
<
real
>
(
subSeqNum
),
min
(
beamSize
,
subSeqNum
));
memcpy
(
indicesData
+
(
i
*
beamSize
),
selSeqs
.
data
(),
selSeqs
.
size
()
*
sizeof
(
real
));
seqStartPos
[
i
+
1
]
=
subSeqStartPos
.
back
();
}
}
MatrixPtr
dataInputPtr1
=
Matrix
::
create
(
curPos
,
layerSize
,
false
,
false
);
MatrixPtr
seqInputPtr
=
dataInputPtr1
->
randomizeUniform
(
);
Matrix
::
create
(
seqStartPos
.
back
(),
layerSize
,
false
,
false
);
config
.
inputDefs
.
push_back
({
INPUT_SELF_DEFINE_DATA
,
config
.
inputDefs
.
push_back
({
INPUT_SELF_DEFINE_DATA
,
"
layer_0
"
,
"
nested_seq_input
"
,
dataInputPtr1
,
seqInputPtr
,
seqStartPos
1
,
seqStartPos
,
subSeqStartPos
});
subSeqStartPos
});
config
.
layerConfig
.
add_inputs
();
config
.
layerConfig
.
add_inputs
();
// Generate the second input
MatrixPtr
dataInputPtr2
=
Matrix
::
create
(
seqStartPos2
[
batchSize
],
1
,
false
,
false
);
dataInputPtr2
->
randomizeUniform
();
config
.
inputDefs
.
push_back
(
config
.
inputDefs
.
push_back
(
{
INPUT_SELF_DEFINE_DATA
,
"
layer_1"
,
dataInputPtr2
,
seqStartPos2
});
{
INPUT_SELF_DEFINE_DATA
,
"
selected_indices"
,
selectedIndices
});
config
.
layerConfig
.
add_inputs
();
config
.
layerConfig
.
add_inputs
();
for
(
auto
useGpu
:
{
false
,
true
})
{
for
(
auto
useGpu
:
{
false
,
true
})
{
testLayerGrad
(
config
,
testLayerGrad
(
config
,
"sub_nested_seq"
,
"sub_nested_seq"
,
/* batchSize */
100
,
/* batchSize */
seqNum
,
/* trans */
false
,
/* trans */
false
,
/* useGpu*/
useGpu
,
/* useGpu*/
useGpu
,
/* useWeight */
false
);
/* useWeight */
false
);
}
}
}
}
TEST
(
Layer
,
ClipLayer
)
{
//
TEST(Layer, ClipLayer) {
const
size_t
batchSize
=
128
;
//
const size_t batchSize = 128;
const
size_t
size
=
512
;
//
const size_t size = 512;
TestConfig
config
;
//
TestConfig config;
config
.
layerConfig
.
set_type
(
"clip"
);
//
config.layerConfig.set_type("clip");
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"input"
,
size
,
0
});
//
config.inputDefs.push_back({INPUT_DATA, "input", size, 0});
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
//
LayerInputConfig* input = config.layerConfig.add_inputs();
ClipConfig
*
layerConf
=
input
->
mutable_clip_conf
();
//
ClipConfig* layerConf = input->mutable_clip_conf();
double
p1
=
std
::
rand
()
/
(
double
)
RAND_MAX
;
//
double p1 = std::rand() / (double)RAND_MAX;
double
p2
=
std
::
rand
()
/
(
double
)
RAND_MAX
;
//
double p2 = std::rand() / (double)RAND_MAX;
layerConf
->
set_min
(
std
::
min
(
p1
,
p2
));
//
layerConf->set_min(std::min(p1, p2));
layerConf
->
set_max
(
std
::
max
(
p1
,
p2
));
//
layerConf->set_max(std::max(p1, p2));
for
(
auto
useGpu
:
{
false
,
true
})
{
//
for (auto useGpu : {false, true}) {
testLayerGrad
(
config
,
"clip"
,
batchSize
,
false
,
useGpu
,
false
);
//
testLayerGrad(config, "clip", batchSize, false, useGpu, false);
}
//
}
}
//
}
//
TEST
(
Layer
,
RowL2NormLayer
)
{
//
TEST(Layer, RowL2NormLayer) {
const
size_t
batchSize
=
128
;
//
const size_t batchSize = 128;
const
size_t
size
=
512
;
//
const size_t size = 512;
TestConfig
config
;
//
TestConfig config;
config
.
layerConfig
.
set_type
(
"row_l2_norm"
);
//
config.layerConfig.set_type("row_l2_norm");
config
.
layerConfig
.
set_size
(
size
);
//
config.layerConfig.set_size(size);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"input"
,
size
,
0
});
//
config.inputDefs.push_back({INPUT_DATA, "input", size, 0});
config
.
layerConfig
.
add_inputs
();
//
config.layerConfig.add_inputs();
for
(
auto
useGpu
:
{
false
,
true
})
{
//
for (auto useGpu : {false, true}) {
testLayerGrad
(
config
,
"row_l2_norm"
,
batchSize
,
false
,
useGpu
,
false
);
//
testLayerGrad(config, "row_l2_norm", batchSize, false, useGpu, false);
}
//
}
}
//
}
int
main
(
int
argc
,
char
**
argv
)
{
int
main
(
int
argc
,
char
**
argv
)
{
testing
::
InitGoogleTest
(
&
argc
,
argv
);
testing
::
InitGoogleTest
(
&
argc
,
argv
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录