Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
6b61a096
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6b61a096
编写于
1月 07, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Optional padding mode, namely ceil or floor, ceil by default.
上级
c8817a19
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
1614 addition
and
1600 deletion
+1614
-1600
paddle/function/PadOp.cpp
paddle/function/PadOp.cpp
+8
-8
paddle/function/PadOpTest.cpp
paddle/function/PadOpTest.cpp
+25
-23
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+1545
-1548
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+16
-20
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+20
-1
未找到文件。
paddle/function/PadOp.cpp
浏览文件 @
6b61a096
...
@@ -73,10 +73,6 @@ void PadGrad<DEVICE_TYPE_CPU>(real* inGrad,
...
@@ -73,10 +73,6 @@ void PadGrad<DEVICE_TYPE_CPU>(real* inGrad,
}
}
}
}
/**
* \param inputs[0] input value.
* \param outputs[0] output value.
*/
template
<
DeviceType
Device
>
template
<
DeviceType
Device
>
class
PadFunc
:
public
FunctionBase
{
class
PadFunc
:
public
FunctionBase
{
public:
public:
...
@@ -89,6 +85,10 @@ public:
...
@@ -89,6 +85,10 @@ public:
padw1_
=
config
.
get
<
int
>
(
"padw1"
);
padw1_
=
config
.
get
<
int
>
(
"padw1"
);
}
}
/**
* \param inputs[0] input value.
* \param outputs[0] output value.
*/
void
calc
(
const
Arguments
&
inputs
,
void
calc
(
const
Arguments
&
inputs
,
const
Arguments
&
outputs
,
const
Arguments
&
outputs
,
const
Arguments
&
inouts
)
override
{
const
Arguments
&
inouts
)
override
{
...
@@ -124,10 +124,6 @@ private:
...
@@ -124,10 +124,6 @@ private:
int
padw1_
;
int
padw1_
;
};
};
/**
* \param inputs[0] input grad.
* \param outputs[0] output grad.
*/
template
<
DeviceType
Device
>
template
<
DeviceType
Device
>
class
PadGradFunc
:
public
FunctionBase
{
class
PadGradFunc
:
public
FunctionBase
{
public:
public:
...
@@ -140,6 +136,10 @@ public:
...
@@ -140,6 +136,10 @@ public:
padw1_
=
config
.
get
<
int
>
(
"padw1"
);
padw1_
=
config
.
get
<
int
>
(
"padw1"
);
}
}
/**
* \param inputs[0] output grad.
* \param inouts[0] input grad.
*/
void
calc
(
const
Arguments
&
inputs
,
void
calc
(
const
Arguments
&
inputs
,
const
Arguments
&
outputs
,
const
Arguments
&
outputs
,
const
Arguments
&
inouts
)
override
{
const
Arguments
&
inouts
)
override
{
...
...
paddle/function/PadOpTest.cpp
浏览文件 @
6b61a096
...
@@ -43,28 +43,30 @@ TEST(Pad, real) {
...
@@ -43,28 +43,30 @@ TEST(Pad, real) {
}
}
}
}
// TEST(PadGrad, real) {
TEST
(
PadGrad
,
real
)
{
// for (size_t numSamples : {5, 32}) {
for
(
size_t
numSamples
:
{
5
,
32
})
{
// for (size_t channels : {1, 5, 32}) {
for
(
size_t
channels
:
{
1
,
5
,
32
})
{
// for (size_t imgSizeH : {5, 33, 100}) {
for
(
size_t
imgSizeH
:
{
5
,
33
,
100
})
{
// for (size_t imgSizeW : {5, 32, 96}) {
for
(
size_t
imgSizeW
:
{
5
,
32
,
96
})
{
// VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
VLOG
(
3
)
<<
" numSamples="
<<
numSamples
<<
" channels="
<<
channels
// << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
<<
" imgSizeH="
<<
imgSizeH
<<
" imgSizeW="
<<
imgSizeW
;
//
// FunctionCompare compare("PadGrad",
FunctionCompare
compare
(
"PadGrad"
,
// FuncConfig()
FuncConfig
()
// .set("padc0", 2).set("padc1", 3)
.
set
(
"padc0"
,
2
)
// .set("padh0", 1).set("padh1", 2)
.
set
(
"padc1"
,
3
)
// .set("padw0", 3).set("padw1", 2));
.
set
(
"padh0"
,
1
)
// Dims inDims{numSamples, channels, imgSizeH, imgSizeW};
.
set
(
"padh1"
,
2
)
// Dims outDims{numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5};
.
set
(
"padw0"
,
3
)
// compare.cmpWithArg({Tensor(nullptr, inDims)},
.
set
(
"padw1"
,
2
));
// {Tensor(nullptr, outDims)},
Dims
inDims
{
numSamples
,
channels
,
imgSizeH
,
imgSizeW
};
// {});
Dims
outDims
{
numSamples
,
channels
+
5
,
imgSizeH
+
3
,
imgSizeW
+
5
};
// }
compare
.
cmpWithArg
(
// }
{
Tensor
(
nullptr
,
inDims
)},
{},
{
Tensor
(
nullptr
,
outDims
)});
// }
}
// }
}
//}
}
}
}
}
// namespace paddle
}
// namespace paddle
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
6b61a096
...
@@ -32,1554 +32,1551 @@ DECLARE_double(checkgrad_eps);
...
@@ -32,1554 +32,1551 @@ DECLARE_double(checkgrad_eps);
DECLARE_bool
(
thread_local_rand_use_global_seed
);
DECLARE_bool
(
thread_local_rand_use_global_seed
);
DECLARE_bool
(
prev_batch_state
);
DECLARE_bool
(
prev_batch_state
);
// TEST(Operator, dot_mul) {
TEST
(
Operator
,
dot_mul
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// OperatorConfig& operatorConf = *config.layerConfig.add_operator_confs();
OperatorConfig
&
operatorConf
=
*
config
.
layerConfig
.
add_operator_confs
();
// operatorConf.set_type("dot_mul");
operatorConf
.
set_type
(
"dot_mul"
);
// operatorConf.set_dotmul_scale(-1);
operatorConf
.
set_dotmul_scale
(
-
1
);
//
// testOperatorGrad(config, operatorConf, 100, false, false);
testOperatorGrad
(
config
,
operatorConf
,
100
,
false
,
false
);
// }
}
//
// TEST(Projection, context) {
TEST
(
Projection
,
context
)
{
// for (auto contextStart : {-5, -3, -1, 0, 3}) {
for
(
auto
contextStart
:
{
-
5
,
-
3
,
-
1
,
0
,
3
})
{
// for (auto contextLength : {1, 2, 5, 7}) {
for
(
auto
contextLength
:
{
1
,
2
,
5
,
7
})
{
// for (auto batchSize : {1, 2, 5, 20, 50}) {
for
(
auto
batchSize
:
{
1
,
2
,
5
,
20
,
50
})
{
// for (auto trainablePadding : {false, true}) {
for
(
auto
trainablePadding
:
{
false
,
true
})
{
// LOG(INFO) << " contextStart=" << contextStart
LOG
(
INFO
)
<<
" contextStart="
<<
contextStart
// << " contextLength=" << contextLength
<<
" contextLength="
<<
contextLength
// << " batchSize=" << batchSize
<<
" batchSize="
<<
batchSize
// << " trainablePadding=" << trainablePadding;
<<
" trainablePadding="
<<
trainablePadding
;
// ProjectionConfig conf;
ProjectionConfig
conf
;
// conf.set_type("context");
conf
.
set_type
(
"context"
);
// conf.set_input_size(10);
conf
.
set_input_size
(
10
);
// conf.set_context_start(contextStart);
conf
.
set_context_start
(
contextStart
);
// conf.set_context_length(contextLength);
conf
.
set_context_length
(
contextLength
);
// conf.set_trainable_padding(trainablePadding);
conf
.
set_trainable_padding
(
trainablePadding
);
// conf.set_output_size(conf.context_length() * conf.input_size());
conf
.
set_output_size
(
conf
.
context_length
()
*
conf
.
input_size
());
// int pad =
int
pad
=
// std::max(0, -conf.context_start()) +
std
::
max
(
0
,
-
conf
.
context_start
())
+
// std::max(0, conf.context_start() + conf.context_length() - 1);
std
::
max
(
0
,
conf
.
context_start
()
+
conf
.
context_length
()
-
1
);
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testProjectionGrad(
testProjectionGrad
(
// conf,
conf
,
// INPUT_SEQUENCE_DATA,
INPUT_SEQUENCE_DATA
,
// trainablePadding ? conf.input_size() * pad : 0,
trainablePadding
?
conf
.
input_size
()
*
pad
:
0
,
// batchSize,
batchSize
,
// useGpu,
useGpu
,
// contextStart + contextLength <= 1); // = testState
contextStart
+
contextLength
<=
1
);
// = testState
// }
}
// }
}
// }
}
// }
}
// }
}
// }
}
//
// TEST(Projection, trans_fc) {
TEST
(
Projection
,
trans_fc
)
{
// ProjectionConfig conf;
ProjectionConfig
conf
;
// conf.set_type("trans_fc");
conf
.
set_type
(
"trans_fc"
);
// conf.set_input_size(50);
conf
.
set_input_size
(
50
);
// conf.set_output_size(20);
conf
.
set_output_size
(
20
);
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testProjectionGrad(conf,
testProjectionGrad
(
conf
,
// INPUT_DATA,
INPUT_DATA
,
// /* parameterSize */ 1000,
/* parameterSize */
1000
,
// /* batchSize */ 100,
/* batchSize */
100
,
// useGpu);
useGpu
);
// }
}
// }
}
//
// TEST(Projection, fc) {
TEST
(
Projection
,
fc
)
{
// ProjectionConfig conf;
ProjectionConfig
conf
;
// conf.set_type("fc");
conf
.
set_type
(
"fc"
);
// conf.set_input_size(10);
conf
.
set_input_size
(
10
);
// conf.set_output_size(20);
conf
.
set_output_size
(
20
);
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testProjectionGrad(conf,
testProjectionGrad
(
conf
,
// INPUT_DATA,
INPUT_DATA
,
// /* parameterSize */ 200,
/* parameterSize */
200
,
// /* batchSize */ 100,
/* batchSize */
100
,
// useGpu);
useGpu
);
// }
}
// }
}
//
// TEST(Projection, dot_mul) {
TEST
(
Projection
,
dot_mul
)
{
// ProjectionConfig conf;
ProjectionConfig
conf
;
// conf.set_type("dot_mul");
conf
.
set_type
(
"dot_mul"
);
// conf.set_input_size(20);
conf
.
set_input_size
(
20
);
// conf.set_output_size(20);
conf
.
set_output_size
(
20
);
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testProjectionGrad(conf,
testProjectionGrad
(
conf
,
// INPUT_DATA,
INPUT_DATA
,
// /* parameterSize */ 20,
/* parameterSize */
20
,
// /* batchSize */ 100,
/* batchSize */
100
,
// useGpu);
useGpu
);
// }
}
// }
}
//
// TEST(Projection, table) {
TEST
(
Projection
,
table
)
{
// ProjectionConfig conf;
ProjectionConfig
conf
;
// conf.set_type("table");
conf
.
set_type
(
"table"
);
// conf.set_input_size(10);
conf
.
set_input_size
(
10
);
// conf.set_output_size(20);
conf
.
set_output_size
(
20
);
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testProjectionGrad(conf,
testProjectionGrad
(
conf
,
// INPUT_LABEL,
INPUT_LABEL
,
// /* parameterSize */ 200,
/* parameterSize */
200
,
// /* batchSize */ 100,
/* batchSize */
100
,
// useGpu);
useGpu
);
// }
}
// }
}
//
// TEST(Projection, identity) {
TEST
(
Projection
,
identity
)
{
// ProjectionConfig conf;
ProjectionConfig
conf
;
// conf.set_type("identity");
conf
.
set_type
(
"identity"
);
// conf.set_input_size(10);
conf
.
set_input_size
(
10
);
// conf.set_output_size(10);
conf
.
set_output_size
(
10
);
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testProjectionGrad(conf,
testProjectionGrad
(
conf
,
// INPUT_DATA,
INPUT_DATA
,
// /* parameterSize */ 0,
/* parameterSize */
0
,
// /* batchSize */ 100,
/* batchSize */
100
,
// useGpu);
useGpu
);
// }
}
// }
}
//
// TEST(Projection, scaling) {
TEST
(
Projection
,
scaling
)
{
// ProjectionConfig conf;
ProjectionConfig
conf
;
// conf.set_type("scaling");
conf
.
set_type
(
"scaling"
);
// conf.set_input_size(10);
conf
.
set_input_size
(
10
);
// conf.set_output_size(10);
conf
.
set_output_size
(
10
);
// for (auto useGpu : {false}) {
for
(
auto
useGpu
:
{
false
})
{
// testProjectionGrad(conf,
testProjectionGrad
(
conf
,
// INPUT_DATA,
INPUT_DATA
,
// /* parameterSize */ 1,
/* parameterSize */
1
,
// /* batchSize */ 100,
/* batchSize */
100
,
// useGpu);
useGpu
);
// }
}
// }
}
//
// void testProjectionConv(size_t groups) {
void
testProjectionConv
(
size_t
groups
)
{
// const int NUM_FILTERS = 18;
const
int
NUM_FILTERS
=
18
;
// const int FILTER_SIZE = 2;
const
int
FILTER_SIZE
=
2
;
// const int FILTER_SIZE_Y = 3;
const
int
FILTER_SIZE_Y
=
3
;
// const int CHANNELS = 3;
const
int
CHANNELS
=
3
;
// const int IMAGE_SIZE = 16;
const
int
IMAGE_SIZE
=
16
;
//
// ProjectionConfig conf;
ProjectionConfig
conf
;
// conf.set_type("conv");
conf
.
set_type
(
"conv"
);
// conf.set_num_filters(NUM_FILTERS);
conf
.
set_num_filters
(
NUM_FILTERS
);
//
// ConvConfig* conv = conf.mutable_conv_conf();
ConvConfig
*
conv
=
conf
.
mutable_conv_conf
();
// conv->set_filter_size(FILTER_SIZE);
conv
->
set_filter_size
(
FILTER_SIZE
);
// conv->set_filter_size_y(FILTER_SIZE_Y);
conv
->
set_filter_size_y
(
FILTER_SIZE_Y
);
// conv->set_channels(CHANNELS);
conv
->
set_channels
(
CHANNELS
);
// conv->set_padding(0);
conv
->
set_padding
(
0
);
// conv->set_padding_y(1);
conv
->
set_padding_y
(
1
);
// conv->set_stride(2);
conv
->
set_stride
(
2
);
// conv->set_stride_y(2);
conv
->
set_stride_y
(
2
);
// conv->set_groups(groups);
conv
->
set_groups
(
groups
);
// conv->set_filter_channels(conv->channels() / conv->groups());
conv
->
set_filter_channels
(
conv
->
channels
()
/
conv
->
groups
());
// conv->set_img_size(IMAGE_SIZE);
conv
->
set_img_size
(
IMAGE_SIZE
);
// int output_x = outputSize(conv->img_size(),
int
output_x
=
outputSize
(
conv
->
img_size
(),
// conv->filter_size(),
conv
->
filter_size
(),
// conv->padding(),
conv
->
padding
(),
// conv->stride(),
conv
->
stride
(),
// /* caffeMode */ true);
/* caffeMode */
true
);
// int output_y = outputSize(conv->img_size(),
int
output_y
=
outputSize
(
conv
->
img_size
(),
// conv->filter_size_y(),
conv
->
filter_size_y
(),
// conv->padding_y(),
conv
->
padding_y
(),
// conv->stride_y(),
conv
->
stride_y
(),
// /* caffeMode */ true);
/* caffeMode */
true
);
// conv->set_output_x(output_x);
conv
->
set_output_x
(
output_x
);
// conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS);
conf
.
set_input_size
(
IMAGE_SIZE
*
IMAGE_SIZE
*
CHANNELS
);
// conf.set_output_size(output_x * output_y * NUM_FILTERS);
conf
.
set_output_size
(
output_x
*
output_y
*
NUM_FILTERS
);
//
// testProjectionGrad(conf,
testProjectionGrad
(
conf
,
// INPUT_DATA,
INPUT_DATA
,
// /* parameterSize */ NUM_FILTERS * CHANNELS * FILTER_SIZE
/* parameterSize */
NUM_FILTERS
*
CHANNELS
*
FILTER_SIZE
*
// *
FILTER_SIZE_Y
/
groups
,
// FILTER_SIZE_Y / groups,
/* batchSize */
100
,
// /* batchSize */ 100,
true
,
// true,
false
,
// false,
NUM_FILTERS
,
// NUM_FILTERS,
true
);
// true);
}
// }
//
#ifndef PADDLE_ONLY_CPU
// #ifndef PADDLE_ONLY_CPU
TEST
(
Projection
,
conv
)
{
// TEST(Projection, conv) {
testProjectionConv
(
1
);
// testProjectionConv(1);
testProjectionConv
(
3
);
// testProjectionConv(3);
}
// }
#endif
// #endif
//
TEST
(
Layer
,
BilinearInterpLayer
)
{
// TEST(Layer, BilinearInterpLayer) {
TestConfig
config
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"bilinear_interp"
);
// config.layerConfig.set_type("bilinear_interp");
config
.
biasSize
=
0
;
// config.biasSize = 0;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
4096
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// LayerInputConfig* input = config.layerConfig.add_inputs();
BilinearInterpConfig
*
bilinear
=
input
->
mutable_bilinear_interp_conf
();
// BilinearInterpConfig* bilinear = input->mutable_bilinear_interp_conf();
ImageConfig
*
image
=
bilinear
->
mutable_image_conf
();
// ImageConfig* image = bilinear->mutable_image_conf();
image
->
set_img_size
(
32
);
// image->set_img_size(32);
image
->
set_img_size_y
(
32
);
// image->set_img_size_y(32);
image
->
set_channels
(
4
);
// image->set_channels(4);
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
for
(
auto
outSize
:
{
32
,
64
})
{
// for (auto outSize : {32, 64}) {
bilinear
->
set_out_size_x
(
outSize
);
// bilinear->set_out_size_x(outSize);
bilinear
->
set_out_size_y
(
outSize
);
// bilinear->set_out_size_y(outSize);
testLayerGrad
(
config
,
"bilinear_interp"
,
10
,
false
,
useGpu
);
// testLayerGrad(config, "bilinear_interp", 10, false, useGpu);
}
// }
}
// }
}
// }
//
TEST
(
Layer
,
concat
)
{
// TEST(Layer, concat) {
TestConfig
config
;
// TestConfig config;
config
.
biasSize
=
0
;
// config.biasSize = 0;
config
.
layerConfig
.
set_type
(
"concat"
);
// config.layerConfig.set_type("concat");
config
.
layerConfig
.
set_size
(
15
);
// config.layerConfig.set_size(15);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_active_type("sigmoid");
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
5
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 5, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testLayerGrad
(
config
,
"concat"
,
100
,
false
,
useGpu
);
// testLayerGrad(config, "concat", 100, false, useGpu);
}
// }
}
// }
//
TEST
(
Layer
,
AddtoLayer
)
{
// TEST(Layer, AddtoLayer) {
TestConfig
config
;
// TestConfig config;
config
.
biasSize
=
0
;
// config.biasSize = 0;
config
.
layerConfig
.
set_type
(
"addto"
);
// config.layerConfig.set_type("addto");
config
.
layerConfig
.
set_size
(
10
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_active_type("sigmoid");
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testLayerGrad
(
config
,
"addto"
,
100
,
false
,
useGpu
);
// testLayerGrad(config, "addto", 100, false, useGpu);
}
// }
}
// }
//
TEST
(
Layer
,
CRFLayer
)
{
// TEST(Layer, CRFLayer) {
TestConfig
config
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"crf"
);
// config.layerConfig.set_type("crf");
config
.
layerConfig
.
set_size
(
10
);
// config.layerConfig.set_size(10);
config
.
biasSize
=
0
;
// config.biasSize = 0;
//
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_0"
,
10
,
120
});
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_0", 10, 120});
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_LABEL
,
"layer_1"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_SEQUENCE_LABEL, "layer_1", 10, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
//
// Not support GPU now
// // Not support GPU now
testLayerGrad
(
config
,
// testLayerGrad(config,
"crf"
,
// "crf",
100
,
// 100,
/* trans */
false
,
// /* trans */ false,
/* useGpu */
false
,
// /* useGpu */ false,
false
/*useWeight*/
,
// false /*useWeight*/,
0.03
/*epsilon*/
);
// 0.03 /*epsilon*/);
}
// }
//
TEST
(
Layer
,
CTCLayer
)
{
// TEST(Layer, CTCLayer) {
TestConfig
config
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"ctc"
);
// config.layerConfig.set_type("ctc");
config
.
layerConfig
.
set_norm_by_times
(
false
);
// config.layerConfig.set_norm_by_times(false);
config
.
layerConfig
.
set_size
(
10
);
// config.layerConfig.set_size(10);
config
.
biasSize
=
0
;
// config.biasSize = 0;
//
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_0"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_LABEL
,
"layer_1"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_SEQUENCE_LABEL, "layer_1", 10, 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testLayerGrad
(
config
,
// testLayerGrad(config, "ctc", 100, /* trans */ false, /* useGpu */
"ctc"
,
// useGpu);
100
,
// }
/* trans */
false
,
/* useGpu */
// }
useGpu
);
//
}
// TEST(Layer, cosSimLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("cos");
TEST
(
Layer
,
cosSimLayer
)
{
// config.layerConfig.set_size(1);
TestConfig
config
;
// config.biasSize = 0;
config
.
layerConfig
.
set_type
(
"cos"
);
//
config
.
layerConfig
.
set_size
(
1
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 50, 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
50
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "cos", 100, false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"cos"
,
100
,
false
,
useGpu
);
//
}
// TEST(Layer, CosSimVecMatLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("cos_vm");
TEST
(
Layer
,
CosSimVecMatLayer
)
{
// config.layerConfig.set_size(5); // output size
TestConfig
config
;
// config.layerConfig.set_cos_scale(2.0);
config
.
layerConfig
.
set_type
(
"cos_vm"
);
//
config
.
layerConfig
.
set_size
(
5
);
// output size
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 20, 0});
config
.
layerConfig
.
set_cos_scale
(
2.0
);
// config.layerConfig.add_inputs();
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 100, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
20
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
100
,
0
});
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "cos_vm", 100, false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"cos_vm"
,
100
,
false
,
useGpu
);
//
}
// void testConvLayer(const string& type, bool trans, bool useGpu) {
}
// TestConfig config;
// config.biasSize = 16;
void
testConvLayer
(
const
string
&
type
,
bool
trans
,
bool
useGpu
)
{
// config.layerConfig.set_type(type);
TestConfig
config
;
// config.layerConfig.set_num_filters(16);
config
.
biasSize
=
16
;
// config.layerConfig.set_partial_sum(1);
config
.
layerConfig
.
set_type
(
type
);
// config.layerConfig.set_shared_biases(true);
config
.
layerConfig
.
set_num_filters
(
16
);
//
config
.
layerConfig
.
set_partial_sum
(
1
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 384, 288});
config
.
layerConfig
.
set_shared_biases
(
true
);
// LayerInputConfig* input = config.layerConfig.add_inputs();
// ConvConfig* conv = input->mutable_conv_conf();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
384
,
288
});
// conv->set_filter_size(2);
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// conv->set_filter_size_y(3);
ConvConfig
*
conv
=
input
->
mutable_conv_conf
();
// conv->set_channels(3);
conv
->
set_filter_size
(
2
);
// conv->set_padding(0);
conv
->
set_filter_size_y
(
3
);
// conv->set_padding_y(1);
conv
->
set_channels
(
3
);
// conv->set_stride(2);
conv
->
set_padding
(
0
);
// conv->set_stride_y(2);
conv
->
set_padding_y
(
1
);
// conv->set_groups(1);
conv
->
set_stride
(
2
);
// conv->set_filter_channels(conv->channels() / conv->groups());
conv
->
set_stride_y
(
2
);
// conv->set_img_size(16);
conv
->
set_groups
(
1
);
// conv->set_img_size_y(8);
conv
->
set_filter_channels
(
conv
->
channels
()
/
conv
->
groups
());
// conv->set_output_x(outputSize(conv->img_size(),
conv
->
set_img_size
(
16
);
// conv->filter_size(),
conv
->
set_img_size_y
(
8
);
// conv->padding(),
conv
->
set_output_x
(
outputSize
(
conv
->
img_size
(),
// conv->stride(),
conv
->
filter_size
(),
// /* caffeMode */ true));
conv
->
padding
(),
// conv->set_output_y(outputSize(conv->img_size_y(),
conv
->
stride
(),
// conv->filter_size_y(),
/* caffeMode */
true
));
// conv->padding_y(),
conv
->
set_output_y
(
outputSize
(
conv
->
img_size_y
(),
// conv->stride_y(),
conv
->
filter_size_y
(),
// /* caffeMode */ true));
conv
->
padding_y
(),
// config.layerConfig.set_size(conv->output_x() * conv->output_y() *
conv
->
stride_y
(),
// config.layerConfig.num_filters());
/* caffeMode */
true
));
//
config
.
layerConfig
.
set_size
(
conv
->
output_x
()
*
conv
->
output_y
()
*
// testLayerGrad(config, "conv", 100, trans, useGpu);
config
.
layerConfig
.
num_filters
());
// // Use small batch_size and useWeight=true to test biasGrad
// testLayerGrad(config, "conv", 2, trans, useGpu, true, 0.02);
testLayerGrad
(
config
,
"conv"
,
100
,
trans
,
useGpu
);
// }
// Use small batch_size and useWeight=true to test biasGrad
//
testLayerGrad
(
config
,
"conv"
,
2
,
trans
,
useGpu
,
true
,
0.02
);
// TEST(Layer, convLayer) {
}
// testConvLayer("exconv", /* trans= */ false, /* useGpu= */ false);
// #ifndef PADDLE_ONLY_CPU
TEST
(
Layer
,
convLayer
)
{
// testConvLayer("exconv", /* trans= */ false, /* useGpu= */ true);
testConvLayer
(
"exconv"
,
/* trans= */
false
,
/* useGpu= */
false
);
// testConvLayer("cudnn_conv", /* trans= */ false, /* useGpu= */ true);
#ifndef PADDLE_ONLY_CPU
// #endif
testConvLayer
(
"exconv"
,
/* trans= */
false
,
/* useGpu= */
true
);
// }
testConvLayer
(
"cudnn_conv"
,
/* trans= */
false
,
/* useGpu= */
true
);
//
#endif
// void testConvTransLayer(const string& type, bool trans, bool useGpu) {
}
// TestConfig config;
// config.biasSize = 3;
void
testConvTransLayer
(
const
string
&
type
,
bool
trans
,
bool
useGpu
)
{
// config.layerConfig.set_type(type);
TestConfig
config
;
// config.layerConfig.set_num_filters(3);
config
.
biasSize
=
3
;
// config.layerConfig.set_partial_sum(1);
config
.
layerConfig
.
set_type
(
type
);
// config.layerConfig.set_shared_biases(true);
config
.
layerConfig
.
set_num_filters
(
3
);
//
config
.
layerConfig
.
set_partial_sum
(
1
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 288});
config
.
layerConfig
.
set_shared_biases
(
true
);
// LayerInputConfig* input = config.layerConfig.add_inputs();
// ConvConfig* conv = input->mutable_conv_conf();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1024
,
288
});
// conv->set_filter_size(2);
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// conv->set_filter_size_y(3);
ConvConfig
*
conv
=
input
->
mutable_conv_conf
();
// conv->set_channels(16);
conv
->
set_filter_size
(
2
);
// conv->set_padding(0);
conv
->
set_filter_size_y
(
3
);
// conv->set_padding_y(1);
conv
->
set_channels
(
16
);
// conv->set_stride(2);
conv
->
set_padding
(
0
);
// conv->set_stride_y(2);
conv
->
set_padding_y
(
1
);
// conv->set_groups(1);
conv
->
set_stride
(
2
);
// conv->set_filter_channels(3 / conv->groups());
conv
->
set_stride_y
(
2
);
// conv->set_img_size(16);
conv
->
set_groups
(
1
);
// conv->set_output_x(outputSize(conv->img_size(),
conv
->
set_filter_channels
(
3
/
conv
->
groups
());
// conv->filter_size(),
conv
->
set_img_size
(
16
);
// conv->padding(),
conv
->
set_output_x
(
outputSize
(
conv
->
img_size
(),
// conv->stride(),
conv
->
filter_size
(),
// /* caffeMode */ true));
conv
->
padding
(),
//
conv
->
stride
(),
// config.layerConfig.set_size(conv->img_size() * conv->img_size() *
/* caffeMode */
true
));
// config.layerConfig.num_filters());
//
config
.
layerConfig
.
set_size
(
conv
->
img_size
()
*
conv
->
img_size
()
*
// testLayerGrad(config, "convTrans", 100, trans, useGpu);
config
.
layerConfig
.
num_filters
());
// // Use small batch_size and useWeight=true to test biasGrad
// testLayerGrad(config, "convTrans", 2, trans, useGpu, true, 0.02);
testLayerGrad
(
config
,
"convTrans"
,
100
,
trans
,
useGpu
);
// }
// Use small batch_size and useWeight=true to test biasGrad
//
testLayerGrad
(
config
,
"convTrans"
,
2
,
trans
,
useGpu
,
true
,
0.02
);
// TEST(Layer, convTransLayer) {
}
// for (auto useGpu : {false, true}) {
// testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu);
TEST
(
Layer
,
convTransLayer
)
{
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testConvTransLayer
(
"exconvt"
,
/* trans= */
false
,
/* useGpu= */
useGpu
);
//
}
// TEST(Layer, blockExpandLayer) {
}
// TestConfig config;
// config.biasSize = 0;
TEST
(
Layer
,
blockExpandLayer
)
{
// config.layerConfig.set_type("blockexpand");
TestConfig
config
;
//
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 6144, 0});
config
.
layerConfig
.
set_type
(
"blockexpand"
);
// LayerInputConfig* input = config.layerConfig.add_inputs();
// BlockExpandConfig* blockExpand = input->mutable_block_expand_conf();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
6144
,
0
});
// blockExpand->set_img_size_x(64);
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// blockExpand->set_img_size_y(32);
BlockExpandConfig
*
blockExpand
=
input
->
mutable_block_expand_conf
();
// blockExpand->set_channels(3);
blockExpand
->
set_img_size_x
(
64
);
// blockExpand->set_padding_x(0);
blockExpand
->
set_img_size_y
(
32
);
// blockExpand->set_padding_y(0);
blockExpand
->
set_channels
(
3
);
// blockExpand->set_block_x(4);
blockExpand
->
set_padding_x
(
0
);
// blockExpand->set_block_y(32);
blockExpand
->
set_padding_y
(
0
);
// blockExpand->set_stride_x(2);
blockExpand
->
set_block_x
(
4
);
// blockExpand->set_stride_y(2);
blockExpand
->
set_block_y
(
32
);
// blockExpand->set_output_x(outputSize(blockExpand->img_size_x(),
blockExpand
->
set_stride_x
(
2
);
// blockExpand->block_x(),
blockExpand
->
set_stride_y
(
2
);
// blockExpand->padding_x(),
blockExpand
->
set_output_x
(
outputSize
(
blockExpand
->
img_size_x
(),
// blockExpand->stride_x(),
blockExpand
->
block_x
(),
// /* caffeMode */ false));
blockExpand
->
padding_x
(),
// blockExpand->set_output_y(outputSize(blockExpand->img_size_y(),
blockExpand
->
stride_x
(),
// blockExpand->block_y(),
/* caffeMode */
false
));
// blockExpand->padding_y(),
blockExpand
->
set_output_y
(
outputSize
(
blockExpand
->
img_size_y
(),
// blockExpand->stride_y(),
blockExpand
->
block_y
(),
// /* caffeMode */ false));
blockExpand
->
padding_y
(),
// config.layerConfig.set_size(blockExpand->block_x() * blockExpand->block_y()
blockExpand
->
stride_y
(),
// *
/* caffeMode */
false
));
// blockExpand->channels());
config
.
layerConfig
.
set_size
(
blockExpand
->
block_x
()
*
blockExpand
->
block_y
()
*
//
blockExpand
->
channels
());
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "blockexpand", 100, false, useGpu);
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"blockexpand"
,
100
,
false
,
useGpu
);
// }
}
//
}
// TEST(Layer, maxoutLayer) {
// TestConfig config;
TEST
(
Layer
,
maxoutLayer
)
{
// config.biasSize = 0;
TestConfig
config
;
// config.layerConfig.set_type("maxout");
config
.
biasSize
=
0
;
//
config
.
layerConfig
.
set_type
(
"maxout"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0});
// LayerInputConfig* input = config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
4096
,
0
});
// MaxOutConfig* maxout = input->mutable_maxout_conf();
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// ImageConfig* image = maxout->mutable_image_conf();
MaxOutConfig
*
maxout
=
input
->
mutable_maxout_conf
();
//
ImageConfig
*
image
=
maxout
->
mutable_image_conf
();
// image->set_img_size(32);
// image->set_img_size_y(32);
image
->
set_img_size
(
32
);
// image->set_channels(4);
image
->
set_img_size_y
(
32
);
// maxout->set_groups(2);
image
->
set_channels
(
4
);
//
maxout
->
set_groups
(
2
);
// for (auto useGpu : {false, true}) {
// testLayerGrad(config, "maxout", 10, false, useGpu);
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"maxout"
,
10
,
false
,
useGpu
);
// }
}
// void testFcLayer(string format, size_t nnz) {
}
// TestConfig config;
void
testFcLayer
(
string
format
,
size_t
nnz
)
{
// config.biasSize = 4096;
TestConfig
config
;
// config.layerConfig.set_type("fc");
config
.
biasSize
=
4096
;
// config.layerConfig.set_size(4096);
config
.
layerConfig
.
set_type
(
"fc"
);
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
set_size
(
4096
);
// config.layerConfig.set_drop_rate(0.1);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
//
config
.
layerConfig
.
set_drop_rate
(
0.1
);
// config.inputDefs.push_back(
// {INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)});
config
.
inputDefs
.
push_back
(
// config.layerConfig.add_inputs();
{
INPUT_DATA
,
"layer_0"
,
8192
,
nnz
,
ParaSparse
(
format
)});
//
config
.
layerConfig
.
add_inputs
();
// LOG(INFO) << config.inputDefs[0].sparse.sparse << " "
// << config.inputDefs[0].sparse.format;
LOG
(
INFO
)
<<
config
.
inputDefs
[
0
].
sparse
.
sparse
<<
" "
//
<<
config
.
inputDefs
[
0
].
sparse
.
format
;
// for (auto useGpu : {false, true}) {
// testLayerGrad(config,
for
(
auto
useGpu
:
{
false
,
true
})
{
// "fc",
testLayerGrad
(
config
,
// 100,
"fc"
,
// /* trans */ false,
100
,
// useGpu,
/* trans */
false
,
// /* weight */ true);
useGpu
,
// }
/* weight */
true
);
// }
}
//
}
// TEST(Layer, fcLayer) {
// testFcLayer("", 4096 * 4096 * 2);
TEST
(
Layer
,
fcLayer
)
{
// testFcLayer("csc", 4096 * 40);
testFcLayer
(
""
,
4096
*
4096
*
2
);
// testFcLayer("csr", 4096 * 40);
testFcLayer
(
"csc"
,
4096
*
40
);
// }
testFcLayer
(
"csr"
,
4096
*
40
);
//
}
// TEST(Layer, SelectiveFullyConnectedLayer) {
// TestConfig config;
TEST
(
Layer
,
SelectiveFullyConnectedLayer
)
{
// size_t nin = 16;
TestConfig
config
;
// size_t nout = 256;
size_t
nin
=
16
;
// config.layerConfig.set_type("selective_fc");
size_t
nout
=
256
;
// config.layerConfig.set_size(nout);
config
.
layerConfig
.
set_type
(
"selective_fc"
);
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
set_size
(
nout
);
// config.layerConfig.set_has_selected_colums(true);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_selective_fc_pass_generation(false);
config
.
layerConfig
.
set_has_selected_colums
(
true
);
// config.biasSize = nout;
config
.
layerConfig
.
set_selective_fc_pass_generation
(
false
);
//
config
.
biasSize
=
nout
;
// config.inputDefs.push_back({INPUT_DATA, "input0", nin, nin * nout});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"input0"
,
nin
,
nin
*
nout
});
// config.inputDefs.push_back(
config
.
layerConfig
.
add_inputs
();
// {INPUT_SPARSE_NON_VALUE_DATA, "index", nout, 0, ParaSparse("csr",
config
.
inputDefs
.
push_back
(
// true)});
{
INPUT_SPARSE_NON_VALUE_DATA
,
"index"
,
nout
,
0
,
ParaSparse
(
"csr"
,
true
)});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// testLayerGrad(config,
testLayerGrad
(
config
,
// "selective_fc",
"selective_fc"
,
// 100,
100
,
// /* trans= */ false,
/* trans= */
false
,
// /* useGup= */ false,
/* useGup= */
false
,
// false);
false
);
// #ifndef PADDLE_ONLY_CPU
#ifndef PADDLE_ONLY_CPU
// testLayerGrad(config,
testLayerGrad
(
config
,
// "selective_fc",
"selective_fc"
,
// 100,
100
,
// /* trans= */ false,
/* trans= */
false
,
// /* useGup= */ true,
/* useGup= */
true
,
// false);
false
);
// #endif
#endif
// }
}
//
// TEST(Layer, DataNormLayer) {
TEST
(
Layer
,
DataNormLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("data_norm");
config
.
layerConfig
.
set_type
(
"data_norm"
);
// config.layerConfig.set_size(20);
config
.
layerConfig
.
set_size
(
20
);
// config.biasSize = 0;
config
.
biasSize
=
0
;
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 20, 100});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
20
,
100
});
// config.inputDefs.back().isStatic = true;
config
.
inputDefs
.
back
().
isStatic
=
true
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto strategy : {"z-score", "min-max", "decimal-scaling"}) {
for
(
auto
strategy
:
{
"z-score"
,
"min-max"
,
"decimal-scaling"
})
{
// config.layerConfig.set_data_norm_strategy(strategy);
config
.
layerConfig
.
set_data_norm_strategy
(
strategy
);
// // The parameters are static, so not support GPU now
// The parameters are static, so not support GPU now
// testLayerGrad(config,
testLayerGrad
(
config
,
// "data_norm",
"data_norm"
,
// 200,
200
,
// /* trans */ false,
/* trans */
false
,
// /* useGpu */ false);
/* useGpu */
false
);
// }
}
// }
}
//
// TEST(Layer, hsigmoidLayer) {
TEST
(
Layer
,
hsigmoidLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("hsigmoid");
config
.
layerConfig
.
set_type
(
"hsigmoid"
);
// config.layerConfig.set_num_classes(5);
config
.
layerConfig
.
set_num_classes
(
5
);
// config.layerConfig.set_size(1);
config
.
layerConfig
.
set_size
(
1
);
// config.biasSize = config.layerConfig.num_classes() - 1;
config
.
biasSize
=
config
.
layerConfig
.
num_classes
()
-
1
;
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 200});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
200
});
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 5, 0});
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
5
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// // Not support GPU now
// Not support GPU now
// testLayerGrad(config, "hsigmoid", 100, /* trans */ false, /* useGpu */
testLayerGrad
(
config
,
// false);
"hsigmoid"
,
// }
100
,
//
/* trans */
false
,
/* useGpu */
// TEST(Layer, multi_cross) {
false
);
// TestConfig config;
}
// config.layerConfig.set_type("multi-class-cross-entropy");
// config.biasSize = 0;
TEST
(
Layer
,
multi_cross
)
{
//
TestConfig
config
;
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
config
.
layerConfig
.
set_type
(
"multi-class-cross-entropy"
);
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 10, 0});
config
.
biasSize
=
0
;
// config.layerConfig.add_inputs();
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
//
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
10
,
0
});
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(
config
.
layerConfig
.
add_inputs
();
// config, "multi-class-cross-entropy", 100, /* trans */ false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
//
config
,
"multi-class-cross-entropy"
,
100
,
/* trans */
false
,
useGpu
);
// TEST(Layer, multi_binary_label_sparse_mat) {
}
// TestConfig config;
}
// config.layerConfig.set_type("multi_binary_label_cross_entropy");
// config.biasSize = 0;
TEST
(
Layer
,
multi_binary_label_sparse_mat
)
{
//
TestConfig
config
;
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
config
.
layerConfig
.
set_type
(
"multi_binary_label_cross_entropy"
);
// config.inputDefs.push_back({INPUT_SPARSE_NON_VALUE_DATA, "layer_1", 50,
config
.
biasSize
=
0
;
// 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_SPARSE_NON_VALUE_DATA
,
"layer_1"
,
50
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config,
// "multi_binary_label_cross_entropy",
for
(
auto
useGpu
:
{
false
,
true
})
{
// 100,
testLayerGrad
(
config
,
// /* trans */ false,
"multi_binary_label_cross_entropy"
,
// useGpu);
100
,
// }
/* trans */
false
,
// }
useGpu
);
//
}
// TEST(layer, multi_binary_label_id) {
}
// TestConfig config;
// config.layerConfig.set_type("multi_binary_label_cross_entropy");
TEST
(
layer
,
multi_binary_label_id
)
{
// config.biasSize = 0;
TestConfig
config
;
//
config
.
layerConfig
.
set_type
(
"multi_binary_label_cross_entropy"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 10, 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
10
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config,
// "multi_binary_label_cross_entropy",
for
(
auto
useGpu
:
{
false
,
true
})
{
// 100,
testLayerGrad
(
config
,
// /* trans */ false,
"multi_binary_label_cross_entropy"
,
// useGpu);
100
,
// }
/* trans */
false
,
// }
useGpu
);
//
}
// TEST(Layer, multi_cross_with_selfnorm) {
}
// TestConfig config;
// config.layerConfig.set_type("multi_class_cross_entropy_with_selfnorm");
TEST
(
Layer
,
multi_cross_with_selfnorm
)
{
// config.layerConfig.set_softmax_selfnorm_alpha(0.1);
TestConfig
config
;
// config.biasSize = 0;
config
.
layerConfig
.
set_type
(
"multi_class_cross_entropy_with_selfnorm"
);
//
config
.
layerConfig
.
set_softmax_selfnorm_alpha
(
0.1
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 10, 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
10
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// // Not support GPU now
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config,
// "multi_class_cross_entropy_with_selfnorm",
// Not support GPU now
// 100,
testLayerGrad
(
config
,
// /* trans */ false,
"multi_class_cross_entropy_with_selfnorm"
,
// /* useGpu */ false);
100
,
// }
/* trans */
false
,
//
/* useGpu */
false
);
// TEST(Layer, multi_cross_soft) {
}
// TestConfig config;
// config.layerConfig.set_type("soft_binary_class_cross_entropy");
TEST
(
Layer
,
multi_cross_soft
)
{
// config.biasSize = 0;
TestConfig
config
;
//
config
.
layerConfig
.
set_type
(
"soft_binary_class_cross_entropy"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 10, 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_1"
,
10
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config,
// "soft_binary_class_cross_entropy",
for
(
auto
useGpu
:
{
false
,
true
})
{
// 100,
testLayerGrad
(
config
,
// /* trans */ false,
"soft_binary_class_cross_entropy"
,
// useGpu);
100
,
// }
/* trans */
false
,
// }
useGpu
);
//
}
// TEST(Layer, square_error) {
}
// TestConfig config;
// config.layerConfig.set_type("square_error");
TEST
(
Layer
,
square_error
)
{
// config.biasSize = 0;
TestConfig
config
;
//
config
.
layerConfig
.
set_type
(
"square_error"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 10, 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_1"
,
10
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "square_error", 100, /* trans */ false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"square_error"
,
100
,
/* trans */
false
,
useGpu
);
//
}
// TEST(Layer, sparse_square_error) {
}
// TestConfig config;
// config.layerConfig.set_type("square_error");
TEST
(
Layer
,
sparse_square_error
)
{
// config.biasSize = 0;
TestConfig
config
;
//
config
.
layerConfig
.
set_type
(
"square_error"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_SPARSE_NON_VALUE_DATA, "layer_1", 50,
// 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_SPARSE_NON_VALUE_DATA
,
"layer_1"
,
50
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// // "GpuSparseMatrix" as label is not supported
// testLayerGrad(config,
// "GpuSparseMatrix" as label is not supported
// "square_error",
testLayerGrad
(
config
,
// 100,
"square_error"
,
// /* trans */ false,
100
,
// /* useGpu */ false);
/* trans */
false
,
// }
/* useGpu */
false
);
//
}
// TEST(Layer, sparse_float_square_error) {
// TestConfig config;
TEST
(
Layer
,
sparse_float_square_error
)
{
// config.layerConfig.set_type("square_error");
TestConfig
config
;
// config.biasSize = 0;
config
.
layerConfig
.
set_type
(
"square_error"
);
//
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 50, 0});
// config.inputDefs.push_back({INPUT_SPARSE_FLOAT_VALUE_DATA, "layer_1", 50,
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
50
,
0
});
// 0});
config
.
inputDefs
.
push_back
({
INPUT_SPARSE_FLOAT_VALUE_DATA
,
"layer_1"
,
50
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// // "GpuSparseMatrix" as label is not supported
// "GpuSparseMatrix" as label is not supported
// testLayerGrad(config,
testLayerGrad
(
config
,
// "square_error",
"square_error"
,
// 100,
100
,
// /* trans */ false,
/* trans */
false
,
// /* useGpu */ false);
/* useGpu */
false
);
// }
}
//
// TEST(Layer, square_error_weighted) {
TEST
(
Layer
,
square_error_weighted
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("square_error");
config
.
layerConfig
.
set_type
(
"square_error"
);
// config.biasSize = 0;
config
.
biasSize
=
0
;
// config.testAccumulate = false;
config
.
testAccumulate
=
false
;
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_1"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_2", 1, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_2"
,
1
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "square_error", 100, /* trans */ false, useGpu);
testLayerGrad
(
config
,
"square_error"
,
100
,
/* trans */
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, huber_two_class) {
TEST
(
Layer
,
huber_two_class
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("huber");
config
.
layerConfig
.
set_type
(
"huber"
);
// config.biasSize = 0;
config
.
biasSize
=
0
;
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// config.inputDefs.push_back({INPUT_LABEL, "layer_1", 2, 0});
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_1"
,
2
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "huber", 100, /* trans */ false, useGpu);
testLayerGrad
(
config
,
"huber"
,
100
,
/* trans */
false
,
useGpu
);
// }
}
// }
}
//
// void testExpandLayer(string trans_type, bool hasSubseq) {
void
testExpandLayer
(
string
trans_type
,
bool
hasSubseq
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("expand");
config
.
layerConfig
.
set_type
(
"expand"
);
//
// config.inputDefs.push_back(
config
.
inputDefs
.
push_back
(
// {trans_type == "non-seq" ? INPUT_DENSE_DIM_DATA : INPUT_SEQUENCE_DATA,
{
trans_type
==
"non-seq"
?
INPUT_DENSE_DIM_DATA
:
INPUT_SEQUENCE_DATA
,
// "layer_0",
"layer_0"
,
// 10,
10
,
// 0});
0
});
// config.inputDefs.push_back(
config
.
inputDefs
.
push_back
(
// {hasSubseq ? INPUT_HASSUB_SEQUENCE_DATA : INPUT_SEQUENCE_DATA,
{
hasSubseq
?
INPUT_HASSUB_SEQUENCE_DATA
:
INPUT_SEQUENCE_DATA
,
// "layer_1",
"layer_1"
,
// 10,
10
,
// 0});
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_trans_type(trans_type);
config
.
layerConfig
.
set_trans_type
(
trans_type
);
// LOG(INFO) << " trans_type=" << trans_type << " hasSubseq=" << hasSubseq;
LOG
(
INFO
)
<<
" trans_type="
<<
trans_type
<<
" hasSubseq="
<<
hasSubseq
;
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "expand", 30, false, useGpu);
testLayerGrad
(
config
,
"expand"
,
30
,
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, ExpandLayer) {
TEST
(
Layer
,
ExpandLayer
)
{
// testExpandLayer("non-seq", false); // non-seq expand to seq
testExpandLayer
(
"non-seq"
,
false
);
// non-seq expand to seq
// testExpandLayer("non-seq", true); // non-seq expand to hasSubseq
testExpandLayer
(
"non-seq"
,
true
);
// non-seq expand to hasSubseq
// testExpandLayer("seq", true); // seq expand to hasSubseq
testExpandLayer
(
"seq"
,
true
);
// seq expand to hasSubseq
// }
}
//
// void testDegradeLayer(bool hasSubseq, string layer_type, string trans_type) {
void
testDegradeLayer
(
bool
hasSubseq
,
string
layer_type
,
string
trans_type
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type(layer_type);
config
.
layerConfig
.
set_type
(
layer_type
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
// config.biasSize = 0;
config
.
biasSize
=
0
;
//
// config.inputDefs.push_back(
config
.
inputDefs
.
push_back
(
// {hasSubseq ? INPUT_HASSUB_SEQUENCE_DATA : INPUT_SEQUENCE_DATA,
{
hasSubseq
?
INPUT_HASSUB_SEQUENCE_DATA
:
INPUT_SEQUENCE_DATA
,
// "layer_0",
"layer_0"
,
// 10,
10
,
// 0});
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.set_trans_type(trans_type);
config
.
layerConfig
.
set_trans_type
(
trans_type
);
//
// auto testDegradeLayerGrad = [](TestConfig& config, string layer_type) {
auto
testDegradeLayerGrad
=
[](
TestConfig
&
config
,
string
layer_type
)
{
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, layer_type, 100, false, useGpu);
testLayerGrad
(
config
,
layer_type
,
100
,
false
,
useGpu
);
// }
}
// };
};
//
// if (layer_type == "average") {
if
(
layer_type
==
"average"
)
{
// for (auto strategy : {"average", "sum", "squarerootn"}) {
for
(
auto
strategy
:
{
"average"
,
"sum"
,
"squarerootn"
})
{
// LOG(INFO) << " hasSubseq=" << hasSubseq << " trans_type=" << trans_type
LOG
(
INFO
)
<<
" hasSubseq="
<<
hasSubseq
<<
" trans_type="
<<
trans_type
// << " average_strategy=" << strategy;
<<
" average_strategy="
<<
strategy
;
// config.layerConfig.set_average_strategy(strategy);
config
.
layerConfig
.
set_average_strategy
(
strategy
);
// testDegradeLayerGrad(config, layer_type);
testDegradeLayerGrad
(
config
,
layer_type
);
// }
}
// } else {
}
else
{
// LOG(INFO) << " hasSubseq=" << hasSubseq << " trans_type=" << trans_type;
LOG
(
INFO
)
<<
" hasSubseq="
<<
hasSubseq
<<
" trans_type="
<<
trans_type
;
// testDegradeLayerGrad(config, layer_type);
testDegradeLayerGrad
(
config
,
layer_type
);
// }
}
// }
}
//
// TEST(Layer, MaxLayer) {
TEST
(
Layer
,
MaxLayer
)
{
// testDegradeLayer(false, "max", "non-seq"); // seq max to non-seq
testDegradeLayer
(
false
,
"max"
,
"non-seq"
);
// seq max to non-seq
// testDegradeLayer(true, "max", "non-seq"); // hasSubseq max to non-seq
testDegradeLayer
(
true
,
"max"
,
"non-seq"
);
// hasSubseq max to non-seq
// testDegradeLayer(true, "max", "seq"); // hasSubseq max to seq
testDegradeLayer
(
true
,
"max"
,
"seq"
);
// hasSubseq max to seq
// }
}
//
// TEST(Layer, SequenceLastInstanceLayer) {
TEST
(
Layer
,
SequenceLastInstanceLayer
)
{
// testDegradeLayer(false,
testDegradeLayer
(
false
,
// "seqlastins",
"seqlastins"
,
// "non-seq"); // seq seqlastins to non-seq
"non-seq"
);
// seq seqlastins to non-seq
// testDegradeLayer(true,
testDegradeLayer
(
true
,
// "seqlastins",
"seqlastins"
,
// "non-seq"); // hasSubseq seqlastins to non-seq
"non-seq"
);
// hasSubseq seqlastins to non-seq
// testDegradeLayer(true, "seqlastins", "seq"); // hasSubseq seqlastins to
testDegradeLayer
(
true
,
"seqlastins"
,
"seq"
);
// hasSubseq seqlastins to
// seq
seq
// }
}
//
// TEST(Layer, AverageLayer) {
TEST
(
Layer
,
AverageLayer
)
{
// testDegradeLayer(false, "average", "non-seq"); // seq average to non-seq
testDegradeLayer
(
false
,
"average"
,
"non-seq"
);
// seq average to non-seq
// testDegradeLayer(true, "average", "non-seq"); // hasSubseq average to
testDegradeLayer
(
true
,
"average"
,
"non-seq"
);
// hasSubseq average to
// non-seq
non
-
// testDegradeLayer(true, "average", "seq"); // hasSubseq average to seq
seq
testDegradeLayer
(
true
,
"average"
,
"seq"
);
// hasSubseq average to seq
// }
}
//
// TEST(Layer, SequenceConcatLayer) {
TEST
(
Layer
,
SequenceConcatLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("seqconcat");
config
.
layerConfig
.
set_type
(
"seqconcat"
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
// config.biasSize = 0;
config
.
biasSize
=
0
;
//
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_0"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_1", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_1"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "seqconcat", 100, false, useGpu);
testLayerGrad
(
config
,
"seqconcat"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, SequenceReshapeLayer) {
TEST
(
Layer
,
SequenceReshapeLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("seqreshape");
config
.
layerConfig
.
set_type
(
"seqreshape"
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
//
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA, "layer_0", 100, 0});
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
"layer_0"
,
100
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "seqreshape", 100, false, useGpu);
testLayerGrad
(
config
,
"seqreshape"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, ConvShiftLayer) {
TEST
(
Layer
,
ConvShiftLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("conv_shift");
config
.
layerConfig
.
set_type
(
"conv_shift"
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 3, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
3
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// // Not support GPU now
// Not support GPU now
// testLayerGrad(config, "conv_shift", 100, false, false);
testLayerGrad
(
config
,
"conv_shift"
,
100
,
false
,
false
);
// }
}
//
// TEST(Layer, PowerLayer) {
TEST
(
Layer
,
PowerLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("power");
config
.
layerConfig
.
set_type
(
"power"
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "power", 100, false, useGpu);
testLayerGrad
(
config
,
"power"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, ConvexCombinationLayer) {
TEST
(
Layer
,
ConvexCombinationLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("convex_comb");
config
.
layerConfig
.
set_type
(
"convex_comb"
);
// config.layerConfig.set_size(20);
config
.
layerConfig
.
set_size
(
20
);
// config.biasSize = 0;
config
.
biasSize
=
0
;
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 5, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
5
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 100, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
100
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "convex_comb", 100, false, useGpu);
testLayerGrad
(
config
,
"convex_comb"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, InterpolationLayer) {
TEST
(
Layer
,
InterpolationLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("interpolation");
config
.
layerConfig
.
set_type
(
"interpolation"
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
// config.biasSize = 0;
config
.
biasSize
=
0
;
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.inputDefs.push_back({INPUT_DATA, "layer_2", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_2"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "interpolation", 100, false, useGpu);
testLayerGrad
(
config
,
"interpolation"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, OuterProdLayer) {
TEST
(
Layer
,
OuterProdLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("out_prod");
config
.
layerConfig
.
set_type
(
"out_prod"
);
// config.layerConfig.set_size(100);
config
.
layerConfig
.
set_size
(
100
);
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "out_prod", 100, false, useGpu);
testLayerGrad
(
config
,
"out_prod"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, SlopeInterceptLayer) {
TEST
(
Layer
,
SlopeInterceptLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("slope_intercept");
config
.
layerConfig
.
set_type
(
"slope_intercept"
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
// config.layerConfig.set_slope(1.0);
config
.
layerConfig
.
set_slope
(
1.0
);
// config.layerConfig.set_intercept(0.1);
config
.
layerConfig
.
set_intercept
(
0.1
);
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "slope_intercept", 100, false, useGpu);
testLayerGrad
(
config
,
"slope_intercept"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
// TEST(Layer, ScalingLayer) {
TEST
(
Layer
,
ScalingLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("scaling");
config
.
layerConfig
.
set_type
(
"scaling"
);
// config.layerConfig.set_size(10);
config
.
layerConfig
.
set_size
(
10
);
// config.biasSize = 0;
config
.
biasSize
=
0
;
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 10, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
10
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// testLayerGrad(config, "scaling", 100, false, useGpu);
testLayerGrad
(
config
,
"scaling"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
// void testNormLayer(const string& normType, bool trans, bool useGpu) {
void
testNormLayer
(
const
string
&
normType
,
bool
trans
,
bool
useGpu
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("norm");
config
.
layerConfig
.
set_type
(
"norm"
);
// config.layerConfig.set_active_type("relu");
config
.
layerConfig
.
set_active_type
(
"relu"
);
//
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1568, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1568
,
0
});
// LayerInputConfig* input = config.layerConfig.add_inputs();
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// NormConfig* norm = input->mutable_norm_conf();
NormConfig
*
norm
=
input
->
mutable_norm_conf
();
// norm->set_norm_type(normType);
norm
->
set_norm_type
(
normType
);
// norm->set_channels(16);
norm
->
set_channels
(
16
);
// norm->set_size(5);
norm
->
set_size
(
5
);
// norm->set_scale(0.001);
norm
->
set_scale
(
0.001
);
// norm->set_pow(0.75);
norm
->
set_pow
(
0.75
);
// norm->set_blocked(0);
norm
->
set_blocked
(
0
);
// norm->set_img_size(14);
norm
->
set_img_size
(
14
);
// norm->set_img_size_y(7);
norm
->
set_img_size_y
(
7
);
// norm->set_output_x(norm->img_size());
norm
->
set_output_x
(
norm
->
img_size
());
// norm->set_output_y(norm->img_size_y());
norm
->
set_output_y
(
norm
->
img_size_y
());
// if (norm->norm_type() == "cmrnorm" ||
if
(
norm
->
norm_type
()
==
"cmrnorm"
||
// norm->norm_type() == "cmrnorm-projection") {
norm
->
norm_type
()
==
"cmrnorm-projection"
)
{
// norm->set_scale(norm->scale() / norm->size());
norm
->
set_scale
(
norm
->
scale
()
/
norm
->
size
());
// } else {
}
else
{
// norm->set_scale(norm->scale() / (norm->size() * norm->size()));
norm
->
set_scale
(
norm
->
scale
()
/
(
norm
->
size
()
*
norm
->
size
()));
// }
}
//
// config.layerConfig.set_size(norm->output_x() * norm->output_y() *
config
.
layerConfig
.
set_size
(
norm
->
output_x
()
*
norm
->
output_y
()
*
// norm->channels());
norm
->
channels
());
// config.biasSize = 0;
config
.
biasSize
=
0
;
//
// testLayerGrad(config, "norm", 100, trans, useGpu);
testLayerGrad
(
config
,
"norm"
,
100
,
trans
,
useGpu
);
// }
}
//
// TEST(Layer, NormLayer) {
TEST
(
Layer
,
NormLayer
)
{
// testNormLayer("cmrnorm-projection", /* trans= */ false, /* useGpu= */
testNormLayer
(
"cmrnorm-projection"
,
// true);
/* trans= */
false
,
/* useGpu= */
// testNormLayer("cmrnorm-projection", /* trans= */ false, /* useGpu= */
true
);
// false);
testNormLayer
(
"cmrnorm-projection"
,
// }
/* trans= */
false
,
/* useGpu= */
//
false
);
// void setPoolConfig(TestConfig* config,
}
// PoolConfig* pool,
// const string& poolType) {
void
setPoolConfig
(
TestConfig
*
config
,
// (*config).biasSize = 0;
PoolConfig
*
pool
,
// (*config).layerConfig.set_type("pool");
const
string
&
poolType
)
{
// (*config).layerConfig.set_num_filters(16);
(
*
config
).
biasSize
=
0
;
//
(
*
config
).
layerConfig
.
set_type
(
"pool"
);
// int kw = 3, kh = 3;
(
*
config
).
layerConfig
.
set_num_filters
(
16
);
// int pw = 0, ph = 0;
// int sw = 2, sh = 2;
int
kw
=
3
,
kh
=
3
;
// pool->set_pool_type(poolType);
int
pw
=
0
,
ph
=
0
;
// pool->set_channels(16);
int
sw
=
2
,
sh
=
2
;
// pool->set_size_x(kw);
pool
->
set_pool_type
(
poolType
);
// pool->set_size_y(kh);
pool
->
set_channels
(
16
);
// pool->set_start(0);
pool
->
set_size_x
(
kw
);
// pool->set_padding(pw);
pool
->
set_size_y
(
kh
);
// pool->set_padding_y(ph);
pool
->
set_start
(
0
);
// pool->set_stride(sw);
pool
->
set_padding
(
pw
);
// pool->set_stride_y(sh);
pool
->
set_padding_y
(
ph
);
//
pool
->
set_stride
(
sw
);
// int ow = outputSize(pool->img_size(), kw, pw, sw, /* caffeMode */ false);
pool
->
set_stride_y
(
sh
);
// int oh = outputSize(pool->img_size_y(), kh, ph, sh, /* caffeMode */ false);
// pool->set_output_x(ow);
int
ow
=
outputSize
(
pool
->
img_size
(),
kw
,
pw
,
sw
,
/* caffeMode */
false
);
// pool->set_output_y(oh);
int
oh
=
outputSize
(
pool
->
img_size_y
(),
kh
,
ph
,
sh
,
/* caffeMode */
false
);
// }
pool
->
set_output_x
(
ow
);
//
pool
->
set_output_y
(
oh
);
// void testPoolLayer(const string& poolType, bool trans, bool useGpu) {
}
// TestConfig config;
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 3136, 0});
void
testPoolLayer
(
const
string
&
poolType
,
bool
trans
,
bool
useGpu
)
{
// LayerInputConfig* input = config.layerConfig.add_inputs();
TestConfig
config
;
// PoolConfig* pool = input->mutable_pool_conf();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
3136
,
0
});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// pool->set_img_size(14);
PoolConfig
*
pool
=
input
->
mutable_pool_conf
();
// pool->set_img_size_y(14);
// setPoolConfig(&config, pool, poolType);
pool
->
set_img_size
(
14
);
// config.layerConfig.set_size(pool->output_x() * pool->output_y() *
pool
->
set_img_size_y
(
14
);
// pool->channels());
setPoolConfig
(
&
config
,
pool
,
poolType
);
//
config
.
layerConfig
.
set_size
(
pool
->
output_x
()
*
pool
->
output_y
()
*
// testLayerGrad(config, "pool", 100, trans, useGpu);
pool
->
channels
());
// }
//
testLayerGrad
(
config
,
"pool"
,
100
,
trans
,
useGpu
);
// #ifndef PADDLE_ONLY_CPU
}
// void testPoolLayer2(const string& poolType, bool trans, bool useGpu) {
// TestConfig config;
#ifndef PADDLE_ONLY_CPU
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 3200, 0});
void
testPoolLayer2
(
const
string
&
poolType
,
bool
trans
,
bool
useGpu
)
{
// LayerInputConfig* input = config.layerConfig.add_inputs();
TestConfig
config
;
// PoolConfig* pool = input->mutable_pool_conf();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
3200
,
0
});
//
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// pool->set_size_y(4);
PoolConfig
*
pool
=
input
->
mutable_pool_conf
();
// pool->set_stride_y(3);
// pool->set_img_size(10);
pool
->
set_size_y
(
4
);
// pool->set_img_size_y(20);
pool
->
set_stride_y
(
3
);
// setPoolConfig(&config, pool, poolType);
pool
->
set_img_size
(
10
);
// pool->set_output_y((pool->img_size_y() - pool->start() - pool->size_y()) /
pool
->
set_img_size_y
(
20
);
// ((float)pool->stride_y()) +
setPoolConfig
(
&
config
,
pool
,
poolType
);
// 1.5);
pool
->
set_output_y
((
pool
->
img_size_y
()
-
pool
->
start
()
-
pool
->
size_y
())
/
// config.layerConfig.set_size(pool->output_x() * pool->output_y() *
((
float
)
pool
->
stride_y
())
+
// pool->channels());
1.5
);
//
config
.
layerConfig
.
set_size
(
pool
->
output_x
()
*
pool
->
output_y
()
*
// testLayerGrad(config, "pool", 100, trans, useGpu);
pool
->
channels
());
// }
// #endif
testLayerGrad
(
config
,
"pool"
,
100
,
trans
,
useGpu
);
//
}
// TEST(Layer, PoolLayer) {
#endif
// testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ false);
// testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ false);
TEST
(
Layer
,
PoolLayer
)
{
//
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
false
);
// #ifndef PADDLE_ONLY_CPU
testPoolLayer
(
"max-projection"
,
/* trans= */
false
,
/* useGpu= */
false
);
// testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ true);
// testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ true);
#ifndef PADDLE_ONLY_CPU
// testPoolLayer("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true);
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
true
);
// testPoolLayer("cudnn-avg-pool", /* trans= */ false, /* useGpu= */ true);
testPoolLayer
(
"max-projection"
,
/* trans= */
false
,
/* useGpu= */
true
);
// testPoolLayer2("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true);
testPoolLayer
(
"cudnn-max-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
// testPoolLayer2("cudnn-avg-pool", /* trans= */ false, /* useGpu= */ true);
testPoolLayer
(
"cudnn-avg-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
// #endif
testPoolLayer2
(
"cudnn-max-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
// }
testPoolLayer2
(
"cudnn-avg-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
//
#endif
// void testSppLayer(const string& poolType,
}
// const int pyramidHeight,
// bool trans,
void
testSppLayer
(
const
string
&
poolType
,
// bool useGpu) {
const
int
pyramidHeight
,
// TestConfig config;
bool
trans
,
// config.layerConfig.set_type("spp");
bool
useGpu
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 3200, 0});
TestConfig
config
;
// LayerInputConfig* input = config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"spp"
);
// SppConfig* sppConfig = input->mutable_spp_conf();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
3200
,
0
});
// sppConfig->set_pool_type(poolType);
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// sppConfig->set_pyramid_height(pyramidHeight);
SppConfig
*
sppConfig
=
input
->
mutable_spp_conf
();
// ImageConfig* imageConfig = sppConfig->mutable_image_conf();
sppConfig
->
set_pool_type
(
poolType
);
// imageConfig->set_channels(16);
sppConfig
->
set_pyramid_height
(
pyramidHeight
);
// imageConfig->set_img_size(10);
ImageConfig
*
imageConfig
=
sppConfig
->
mutable_image_conf
();
// imageConfig->set_img_size_y(20);
imageConfig
->
set_channels
(
16
);
// int outputSize = (std::pow(4, sppConfig->pyramid_height()) - 1) / (4 - 1);
imageConfig
->
set_img_size
(
10
);
// config.layerConfig.set_size(outputSize * imageConfig->channels());
imageConfig
->
set_img_size_y
(
20
);
// testLayerGrad(config, "spp", 100, trans, useGpu);
int
outputSize
=
(
std
::
pow
(
4
,
sppConfig
->
pyramid_height
())
-
1
)
/
(
4
-
1
);
// }
config
.
layerConfig
.
set_size
(
outputSize
*
imageConfig
->
channels
());
//
testLayerGrad
(
config
,
"spp"
,
100
,
trans
,
useGpu
);
// TEST(Layer, SpatialPyramidPoolLayer) {
}
// for (auto useGpu : {false, true}) {
// for (auto pyramidHeight : {1, 2, 3}) {
TEST
(
Layer
,
SpatialPyramidPoolLayer
)
{
// testSppLayer("avg-projection", pyramidHeight, false, useGpu);
for
(
auto
useGpu
:
{
false
,
true
})
{
// testSppLayer("max-projection", pyramidHeight, false, useGpu);
for
(
auto
pyramidHeight
:
{
1
,
2
,
3
})
{
// }
testSppLayer
(
"avg-projection"
,
pyramidHeight
,
false
,
useGpu
);
// }
testSppLayer
(
"max-projection"
,
pyramidHeight
,
false
,
useGpu
);
// }
}
//
}
// TEST(Layer, rankCostLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("rank-cost");
TEST
(
Layer
,
rankCostLayer
)
{
// config.biasSize = 0;
TestConfig
config
;
//
config
.
layerConfig
.
set_type
(
"rank-cost"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 1, 0});
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_2", 1, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
1
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_2"
,
1
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "rank-cost", 100, false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"rank-cost"
,
100
,
false
,
useGpu
);
//
}
// TEST(Layer, sumCostLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("sum_cost");
TEST
(
Layer
,
sumCostLayer
)
{
// config.biasSize = 0;
TestConfig
config
;
//
config
.
layerConfig
.
set_type
(
"sum_cost"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
config
.
biasSize
=
0
;
// config.layerConfig.add_inputs();
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "sum_cost", 100, false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"sum_cost"
,
100
,
false
,
useGpu
);
//
}
// TEST(Layer, weightedRankCostLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("rank-cost");
TEST
(
Layer
,
weightedRankCostLayer
)
{
// config.biasSize = 0;
TestConfig
config
;
//
config
.
layerConfig
.
set_type
(
"rank-cost"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
config
.
biasSize
=
0
;
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 1, 0});
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_2", 1, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1
,
0
});
// config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_3", 1, 0});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
1
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_2"
,
1
,
0
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA_TARGET
,
"layer_3"
,
1
,
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "weighted-rank-cost", 100, false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"weighted-rank-cost"
,
100
,
false
,
useGpu
);
//
}
// TEST(Layer, TensorLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("tensor");
TEST
(
Layer
,
TensorLayer
)
{
// config.layerConfig.set_size(10);
TestConfig
config
;
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
set_type
(
"tensor"
);
// config.biasSize = config.layerConfig.size();
config
.
layerConfig
.
set_size
(
10
);
//
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 5, 250});
config
.
biasSize
=
config
.
layerConfig
.
size
();
// config.inputDefs.push_back({INPUT_DATA, "layer_1", 5, 0});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
5
,
250
});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1"
,
5
,
0
});
//
config
.
layerConfig
.
add_inputs
();
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "tensor", 100, false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"tensor"
,
100
,
false
,
useGpu
);
//
}
// TEST(Layer, RecurrentLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("recurrent");
TEST
(
Layer
,
RecurrentLayer
)
{
// config.layerConfig.set_size(4);
TestConfig
config
;
// config.layerConfig.set_active_type("tanh");
config
.
layerConfig
.
set_type
(
"recurrent"
);
// config.biasSize = 4;
config
.
layerConfig
.
set_size
(
4
);
//
config
.
layerConfig
.
set_active_type
(
"tanh"
);
// config.inputDefs.push_back(
config
.
biasSize
=
4
;
// {INPUT_SEQUENCE_DATA, "layer_0", /* dim= */ 4, /* paraSize= */ 16});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
(
//
{
INPUT_SEQUENCE_DATA
,
"layer_0"
,
/* dim= */
4
,
/* paraSize= */
16
});
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// for (auto reversed : {false, true}) {
// config.layerConfig.set_reversed(reversed);
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.testState = !reversed;
for
(
auto
reversed
:
{
false
,
true
})
{
// testLayerGrad(config, "recurrent", 50, /* trans= */ false, useGpu);
config
.
layerConfig
.
set_reversed
(
reversed
);
// }
config
.
testState
=
!
reversed
;
// }
testLayerGrad
(
config
,
"recurrent"
,
50
,
/* trans= */
false
,
useGpu
);
// }
}
//
}
// TEST(Layer, LstmLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("lstmemory");
TEST
(
Layer
,
LstmLayer
)
{
// config.layerConfig.set_size(4);
TestConfig
config
;
// config.layerConfig.set_active_type("tanh");
config
.
layerConfig
.
set_type
(
"lstmemory"
);
// config.layerConfig.set_active_state_type("sigmoid");
config
.
layerConfig
.
set_size
(
4
);
// config.layerConfig.set_active_gate_type("sigmoid");
config
.
layerConfig
.
set_active_type
(
"tanh"
);
// config.biasSize = 28;
config
.
layerConfig
.
set_active_state_type
(
"sigmoid"
);
//
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// config.inputDefs.push_back(
config
.
biasSize
=
28
;
// {INPUT_SEQUENCE_DATA, "layer_0", /* dim= */ 16, /* paraSize= */ 64});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
(
//
{
INPUT_SEQUENCE_DATA
,
"layer_0"
,
/* dim= */
16
,
/* paraSize= */
64
});
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// for (auto reversed : {false, true}) {
// config.layerConfig.set_reversed(reversed);
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.testState = !reversed;
for
(
auto
reversed
:
{
false
,
true
})
{
// testLayerGrad(config, "lstmemory", 100, /* trans= */ false, useGpu);
config
.
layerConfig
.
set_reversed
(
reversed
);
// }
config
.
testState
=
!
reversed
;
// }
testLayerGrad
(
config
,
"lstmemory"
,
100
,
/* trans= */
false
,
useGpu
);
// for (auto useGpu : {true}) {
}
// config.testBatchState = true;
}
// config.layerConfig.set_reversed(false);
for
(
auto
useGpu
:
{
true
})
{
// testLayerGrad(config, "lstmemory", 10, /* trans= */ false, useGpu);
config
.
testBatchState
=
true
;
// }
config
.
layerConfig
.
set_reversed
(
false
);
// }
testLayerGrad
(
config
,
"lstmemory"
,
10
,
/* trans= */
false
,
useGpu
);
//
}
// TEST(Layer, MDLstmLayer) {
}
// TestConfig config;
// config.layerConfig.set_type("mdlstmemory");
TEST
(
Layer
,
MDLstmLayer
)
{
// config.layerConfig.set_size(4);
TestConfig
config
;
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
set_type
(
"mdlstmemory"
);
// config.layerConfig.set_active_state_type("sigmoid");
config
.
layerConfig
.
set_size
(
4
);
// config.layerConfig.set_active_gate_type("sigmoid");
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.biasSize = 4 * 9;
config
.
layerConfig
.
set_active_state_type
(
"sigmoid"
);
//
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// config.inputDefs.push_back(
config
.
biasSize
=
4
*
9
;
// {INPUT_SEQUENCE_MDIM_DATA, "layer_0", 4 * 5, 4 * 4 * 5});
// config.layerConfig.add_inputs();
config
.
inputDefs
.
push_back
(
// config.layerConfig.add_directions(true);
{
INPUT_SEQUENCE_MDIM_DATA
,
"layer_0"
,
4
*
5
,
4
*
4
*
5
});
// config.layerConfig.add_directions(true);
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_directions
(
true
);
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_directions
(
true
);
// for (int i = 0; i < 2; i++) {
// for (int j = 0; j < 2; j++) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.set_directions(0, bool(i));
for
(
int
i
=
0
;
i
<
2
;
i
++
)
{
// config.layerConfig.set_directions(1, bool(j));
for
(
int
j
=
0
;
j
<
2
;
j
++
)
{
// testLayerGrad(config, "mdlstmemory", 100, false, useGpu);
config
.
layerConfig
.
set_directions
(
0
,
bool
(
i
));
// }
config
.
layerConfig
.
set_directions
(
1
,
bool
(
j
));
// }
testLayerGrad
(
config
,
"mdlstmemory"
,
100
,
false
,
useGpu
);
// }
}
// }
}
//
}
// TEST(Layer, ParameterReluLayer) {
}
// auto testParameterReluLayer = [&](size_t inputSize, size_t channels) {
// TestConfig config;
TEST
(
Layer
,
ParameterReluLayer
)
{
// config.layerConfig.set_type("prelu");
auto
testParameterReluLayer
=
[
&
](
size_t
inputSize
,
size_t
channels
)
{
// config.inputDefs.push_back({INPUT_DATA, "layer_0", inputSize, channels});
TestConfig
config
;
// config.layerConfig.add_inputs();
config
.
layerConfig
.
set_type
(
"prelu"
);
// config.layerConfig.set_size(inputSize);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
inputSize
,
channels
});
// config.layerConfig.set_partial_sum(inputSize /
config
.
layerConfig
.
add_inputs
();
// channels); // size of feature map
config
.
layerConfig
.
set_size
(
inputSize
);
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
set_partial_sum
(
inputSize
/
// testLayerGrad(config, "prelu", 100, false, useGpu);
channels
);
// size of feature map
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// };
testLayerGrad
(
config
,
"prelu"
,
100
,
false
,
useGpu
);
//
}
// testParameterReluLayer(192, 1);
};
// testParameterReluLayer(192, 3);
// testParameterReluLayer(192, 192);
testParameterReluLayer
(
192
,
1
);
// }
testParameterReluLayer
(
192
,
3
);
//
testParameterReluLayer
(
192
,
192
);
// TEST(Layer, ResizeLayer) {
}
// TestConfig config;
// config.biasSize = 0;
TEST
(
Layer
,
ResizeLayer
)
{
// config.layerConfig.set_type("resize");
TestConfig
config
;
// config.layerConfig.set_size(64);
config
.
biasSize
=
0
;
//
config
.
layerConfig
.
set_type
(
"resize"
);
// config.inputDefs.push_back({INPUT_DATA, "layer_0", 16, 0});
config
.
layerConfig
.
set_size
(
64
);
// config.layerConfig.add_inputs();
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
16
,
0
});
// for (auto useGpu : {false, true}) {
config
.
layerConfig
.
add_inputs
();
// testLayerGrad(config, "resize", 100, false, useGpu);
// }
for
(
auto
useGpu
:
{
false
,
true
})
{
// }
testLayerGrad
(
config
,
"resize"
,
100
,
false
,
useGpu
);
//
}
// TEST(Layer, NCELayer) {
}
// TestConfig config;
// size_t numClasses = 4;
TEST
(
Layer
,
NCELayer
)
{
// config.layerConfig.set_type("nce");
TestConfig
config
;
// config.layerConfig.set_size(1);
size_t
numClasses
=
4
;
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
set_type
(
"nce"
);
// config.layerConfig.set_num_classes(numClasses);
config
.
layerConfig
.
set_size
(
1
);
// config.biasSize = numClasses;
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
//
config
.
layerConfig
.
set_num_classes
(
numClasses
);
// config.inputDefs.push_back(
config
.
biasSize
=
numClasses
;
// {INPUT_DATA, "layer_0", /* dim= */ 16, /* paraSize= */ 16 *
// numClasses});
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back(
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
16
,
/* paraSize= */
16
*
numClasses
});
// {INPUT_LABEL, "label", /* dim= */ numClasses, /* paraSize= */ 0});
config
.
inputDefs
.
push_back
(
// config.layerConfig.add_inputs();
{
INPUT_LABEL
,
"label"
,
/* dim= */
numClasses
,
/* paraSize= */
0
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// for (auto withWeight : {false, true}) {
// if (withWeight) {
for
(
auto
withWeight
:
{
false
,
true
})
{
// config.inputDefs.push_back(
if
(
withWeight
)
{
// {INPUT_DATA_TARGET, "weight", /* dim= */ 1, /* paraSize= */ 0});
config
.
inputDefs
.
push_back
(
// config.layerConfig.add_inputs();
{
INPUT_DATA_TARGET
,
"weight"
,
/* dim= */
1
,
/* paraSize= */
0
});
// }
config
.
layerConfig
.
add_inputs
();
//
}
// for (auto isIdLabel : {false, true}) {
// config.inputDefs[1] = {
for
(
auto
isIdLabel
:
{
false
,
true
})
{
// isIdLabel ? INPUT_LABEL : INPUT_SPARSE_NON_VALUE_DATA,
config
.
inputDefs
[
1
]
=
{
// "label",
isIdLabel
?
INPUT_LABEL
:
INPUT_SPARSE_NON_VALUE_DATA
,
// /* dim= */ numClasses,
"label"
,
// /* paraSize= */ 0};
/* dim= */
numClasses
,
//
/* paraSize= */
0
};
// for (auto withDist : {false, true}) {
// config.layerConfig.clear_neg_sampling_dist();
for
(
auto
withDist
:
{
false
,
true
})
{
// if (withDist) {
config
.
layerConfig
.
clear_neg_sampling_dist
();
// double sum = 0;
if
(
withDist
)
{
// for (size_t i = 0; i < numClasses; ++i) {
double
sum
=
0
;
// real p = rand(); // NOLINT use rand_r
for
(
size_t
i
=
0
;
i
<
numClasses
;
++
i
)
{
// config.layerConfig.add_neg_sampling_dist(p);
real
p
=
rand
();
// NOLINT use rand_r
// sum += p;
config
.
layerConfig
.
add_neg_sampling_dist
(
p
);
// }
sum
+=
p
;
// for (size_t i = 0; i < numClasses; ++i) {
}
// real p = config.layerConfig.neg_sampling_dist(i) / sum;
for
(
size_t
i
=
0
;
i
<
numClasses
;
++
i
)
{
// config.layerConfig.set_neg_sampling_dist(i, p);
real
p
=
config
.
layerConfig
.
neg_sampling_dist
(
i
)
/
sum
;
// }
config
.
layerConfig
.
set_neg_sampling_dist
(
i
,
p
);
// }
}
// LOG(INFO) << "NCELayer "
}
// << " isIdLabel=" << isIdLabel << " withWeight=" <<
LOG
(
INFO
)
<<
"NCELayer "
// withWeight
<<
" isIdLabel="
<<
isIdLabel
<<
" withWeight="
<<
withWeight
// << " withDist=" << withDist;
<<
" withDist="
<<
withDist
;
// // Not support GPU now
// Not support GPU now
// testLayerGrad(config,
testLayerGrad
(
config
,
// "nce",
"nce"
,
// 100,
100
,
// /* trans= */ false,
/* trans= */
false
,
// /* useGpu */ false);
/* useGpu */
false
);
// }
}
// }
}
// }
}
// }
}
//
// TEST(Layer, GatedRecurrentLayer) {
TEST
(
Layer
,
GatedRecurrentLayer
)
{
// TestConfig config;
TestConfig
config
;
// config.layerConfig.set_type("gated_recurrent");
config
.
layerConfig
.
set_type
(
"gated_recurrent"
);
// config.layerConfig.set_size(4);
config
.
layerConfig
.
set_size
(
4
);
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_active_gate_type("sigmoid");
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// config.biasSize = 12;
config
.
biasSize
=
12
;
//
// config.inputDefs.push_back(
config
.
inputDefs
.
push_back
(
// {INPUT_SEQUENCE_DATA, "layer_0", /* dim= */ 12, /* paraSize= */ 48});
{
INPUT_SEQUENCE_DATA
,
"layer_0"
,
/* dim= */
12
,
/* paraSize= */
48
});
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
//
// for (auto useGpu : {false, true}) {
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto reversed : {false, true}) {
for
(
auto
reversed
:
{
false
,
true
})
{
// config.layerConfig.set_reversed(reversed);
config
.
layerConfig
.
set_reversed
(
reversed
);
// config.testState = !reversed;
config
.
testState
=
!
reversed
;
// testLayerGrad(config, "gated_recurrent", 100, /* trans= */ false,
testLayerGrad
(
config
,
"gated_recurrent"
,
100
,
/* trans= */
false
,
useGpu
);
// useGpu);
}
// }
}
// }
}
// }
//
TEST
(
Layer
,
GruStepLayer
)
{
// TEST(Layer, GruStepLayer) {
TestConfig
config
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"gru_step"
);
// config.layerConfig.set_type("gru_step");
config
.
layerConfig
.
set_size
(
4
);
// config.layerConfig.set_size(4);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// config.layerConfig.set_active_gate_type("sigmoid");
config
.
biasSize
=
12
;
// config.biasSize = 12;
//
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back(
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
12
,
/* paraSize= */
48
});
// {INPUT_DATA, "layer_0", /* dim= */ 12, /* paraSize= */ 48});
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back(
{
INPUT_DATA
,
"layer_1"
,
/* dim= */
4
,
/* paraSize= */
0
});
// {INPUT_DATA, "layer_1", /* dim= */ 4, /* paraSize= */ 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testLayerGrad
(
config
,
"gruStep"
,
100
,
/* trans= */
false
,
useGpu
);
// testLayerGrad(config, "gruStep", 100, /* trans= */ false, useGpu);
}
// }
}
// }
//
TEST
(
Layer
,
LstmStepLayer
)
{
// TEST(Layer, LstmStepLayer) {
TestConfig
config
;
// TestConfig config;
config
.
layerConfig
.
set_type
(
"lstm_step"
);
// config.layerConfig.set_type("lstm_step");
config
.
layerConfig
.
set_size
(
4
);
// config.layerConfig.set_size(4);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_active_type("sigmoid");
config
.
layerConfig
.
set_active_state_type
(
"sigmoid"
);
// config.layerConfig.set_active_state_type("sigmoid");
config
.
layerConfig
.
set_active_gate_type
(
"sigmoid"
);
// config.layerConfig.set_active_gate_type("sigmoid");
config
.
biasSize
=
12
;
// config.biasSize = 12;
config
.
testAccumulate
=
false
;
// config.testAccumulate = false;
//
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back(
{
INPUT_DATA
,
"layer_0"
,
/* dim= */
16
,
/* paraSize= */
0
});
// {INPUT_DATA, "layer_0", /* dim= */ 16, /* paraSize= */ 0});
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back(
{
INPUT_DATA
,
"layer_1"
,
/* dim= */
4
,
/* paraSize= */
0
});
// {INPUT_DATA, "layer_1", /* dim= */ 4, /* paraSize= */ 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
//
for
(
auto
useGpu
:
{
false
,
true
})
{
// for (auto useGpu : {false, true}) {
testLayerGrad
(
config
,
"lstmStep"
,
100
,
/* trans= */
false
,
useGpu
);
// testLayerGrad(config, "lstmStep", 100, /* trans= */ false, useGpu);
}
// }
}
// }
//
void
testBatchNormLayer
(
const
string
&
type
,
bool
trans
,
bool
useGpu
)
{
// void testBatchNormLayer(const string& type, bool trans, bool useGpu) {
TestConfig
config
;
// TestConfig config;
const
int
CHANNELS
=
10
;
// const int CHANNELS = 10;
const
int
IMG_SIZE
=
16
;
// const int IMG_SIZE = 16;
const
int
IMG_SIZE_Y
=
8
;
// const int IMG_SIZE_Y = 8;
size_t
size
=
CHANNELS
*
IMG_SIZE
*
IMG_SIZE_Y
;
// size_t size = CHANNELS * IMG_SIZE * IMG_SIZE_Y;
config
.
layerConfig
.
set_type
(
type
);
// config.layerConfig.set_type(type);
config
.
layerConfig
.
set_size
(
size
);
// config.layerConfig.set_size(size);
config
.
layerConfig
.
set_active_type
(
"sigmoid"
);
// config.layerConfig.set_active_type("sigmoid");
config
.
biasSize
=
CHANNELS
;
// config.biasSize = CHANNELS;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
// config.inputDefs.push_back({INPUT_DATA,
"layer_0"
,
// "layer_0",
/* dim= */
size
,
// /* dim= */ size,
/* paraSize= */
CHANNELS
});
// /* paraSize= */ CHANNELS});
//
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_1_running_mean"
,
1
,
CHANNELS
});
// config.inputDefs.push_back({INPUT_DATA, "layer_1_running_mean", 1,
config
.
inputDefs
.
back
().
isStatic
=
true
;
// CHANNELS});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_2_running_var"
,
1
,
CHANNELS
});
// config.inputDefs.back().isStatic = true;
config
.
inputDefs
.
back
().
isStatic
=
true
;
// config.inputDefs.push_back({INPUT_DATA, "layer_2_running_var", 1,
// CHANNELS});
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.back().isStatic = true;
config
.
layerConfig
.
add_inputs
();
//
config
.
layerConfig
.
add_inputs
();
// LayerInputConfig* input = config.layerConfig.add_inputs();
// config.layerConfig.add_inputs();
ImageConfig
*
img_conf
=
input
->
mutable_image_conf
();
// config.layerConfig.add_inputs();
img_conf
->
set_channels
(
CHANNELS
);
//
img_conf
->
set_img_size
(
IMG_SIZE
);
// ImageConfig* img_conf = input->mutable_image_conf();
img_conf
->
set_img_size_y
(
IMG_SIZE_Y
);
// img_conf->set_channels(CHANNELS);
// img_conf->set_img_size(IMG_SIZE);
testLayerGrad
(
config
,
// img_conf->set_img_size_y(IMG_SIZE_Y);
"batch_norm"
,
//
64
,
// testLayerGrad(config,
/* trans= */
trans
,
// "batch_norm",
useGpu
,
// 64,
/* useWeight */
true
);
// /* trans= */ trans,
}
// useGpu,
// /* useWeight */ true);
TEST
(
Layer
,
BatchNormalizationLayer
)
{
// }
testBatchNormLayer
(
"batch_norm"
,
false
,
false
);
//
#ifndef PADDLE_ONLY_CPU
// TEST(Layer, BatchNormalizationLayer) {
testBatchNormLayer
(
"batch_norm"
,
false
,
true
);
// testBatchNormLayer("batch_norm", false, false);
if
(
hl_get_cudnn_lib_version
()
>=
int
(
4000
))
{
// #ifndef PADDLE_ONLY_CPU
testBatchNormLayer
(
"cudnn_batch_norm"
,
false
,
true
);
// testBatchNormLayer("batch_norm", false, true);
}
// if (hl_get_cudnn_lib_version() >= int(4000)) {
#endif
// testBatchNormLayer("cudnn_batch_norm", false, true);
}
// }
// #endif
TEST
(
Operator
,
conv
)
{
// }
TestConfig
config
;
//
const
int
NUM_FILTERS
=
16
;
// TEST(Operator, conv) {
const
int
FILTER_SIZE
=
2
;
// TestConfig config;
const
int
FILTER_SIZE_Y
=
3
;
// const int NUM_FILTERS = 16;
const
int
CHANNELS
=
3
;
// const int FILTER_SIZE = 2;
const
int
IMAGE_SIZE
=
16
;
// const int FILTER_SIZE_Y = 3;
const
int
IMAGE_SIZE_Y
=
8
;
// const int CHANNELS = 3;
OperatorConfig
&
operatorConf
=
*
config
.
layerConfig
.
add_operator_confs
();
// const int IMAGE_SIZE = 16;
operatorConf
.
set_type
(
"conv"
);
// const int IMAGE_SIZE_Y = 8;
ConvConfig
*
conv
=
operatorConf
.
mutable_conv_conf
();
// OperatorConfig& operatorConf = *config.layerConfig.add_operator_confs();
operatorConf
.
set_num_filters
(
NUM_FILTERS
);
// operatorConf.set_type("conv");
conv
->
set_filter_size
(
FILTER_SIZE
);
// ConvConfig* conv = operatorConf.mutable_conv_conf();
conv
->
set_filter_size_y
(
FILTER_SIZE_Y
);
// operatorConf.set_num_filters(NUM_FILTERS);
conv
->
set_channels
(
CHANNELS
);
// conv->set_filter_size(FILTER_SIZE);
conv
->
set_padding
(
0
);
// conv->set_filter_size_y(FILTER_SIZE_Y);
conv
->
set_padding_y
(
1
);
// conv->set_channels(CHANNELS);
conv
->
set_stride
(
2
);
// conv->set_padding(0);
conv
->
set_stride_y
(
2
);
// conv->set_padding_y(1);
conv
->
set_groups
(
1
);
// conv->set_stride(2);
conv
->
set_filter_channels
(
conv
->
channels
()
/
conv
->
groups
());
// conv->set_stride_y(2);
conv
->
set_img_size
(
IMAGE_SIZE
);
// conv->set_groups(1);
conv
->
set_img_size_y
(
IMAGE_SIZE_Y
);
// conv->set_filter_channels(conv->channels() / conv->groups());
conv
->
set_output_x
(
outputSize
(
conv
->
img_size
(),
// conv->set_img_size(IMAGE_SIZE);
conv
->
filter_size
(),
// conv->set_img_size_y(IMAGE_SIZE_Y);
conv
->
padding
(),
// conv->set_output_x(outputSize(conv->img_size(),
conv
->
stride
(),
// conv->filter_size(),
/* caffeMode */
true
));
// conv->padding(),
conv
->
set_output_y
(
outputSize
(
conv
->
img_size_y
(),
// conv->stride(),
conv
->
filter_size_y
(),
// /* caffeMode */ true));
conv
->
padding_y
(),
// conv->set_output_y(outputSize(conv->img_size_y(),
conv
->
stride_y
(),
// conv->filter_size_y(),
/* caffeMode */
true
));
// conv->padding_y(),
config
.
layerConfig
.
set_size
(
conv
->
output_x
()
*
conv
->
output_y
()
*
// conv->stride_y(),
NUM_FILTERS
);
// /* caffeMode */ true));
// config.layerConfig.set_size(conv->output_x() * conv->output_y() *
config
.
inputDefs
.
push_back
(
// NUM_FILTERS);
{
INPUT_DATA
,
"layer_0"
,
IMAGE_SIZE
*
IMAGE_SIZE_Y
*
CHANNELS
,
0
});
//
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back(
{
INPUT_DATA
,
// {INPUT_DATA, "layer_0", IMAGE_SIZE * IMAGE_SIZE_Y * CHANNELS, 0});
"layer_1"
,
// config.inputDefs.push_back(
FILTER_SIZE
*
FILTER_SIZE_Y
*
CHANNELS
*
NUM_FILTERS
,
// {INPUT_DATA,
0
});
// "layer_1",
config
.
layerConfig
.
add_inputs
();
// FILTER_SIZE * FILTER_SIZE_Y * CHANNELS * NUM_FILTERS,
config
.
layerConfig
.
add_inputs
();
// 0});
// config.layerConfig.add_inputs();
testOperatorGrad
(
config
,
operatorConf
,
100
,
/*useGpu*/
true
,
false
);
// config.layerConfig.add_inputs();
}
//
// testOperatorGrad(config, operatorConf, 100, /*useGpu*/ true, false);
TEST
(
Layer
,
FeatureMapExpandLayer
)
{
// }
TestConfig
config
;
//
config
.
layerConfig
.
set_type
(
"featmap_expand"
);
// TEST(Layer, FeatureMapExpandLayer) {
const
int
CHANNELS
=
10
;
// TestConfig config;
const
int
INPUT_SIZE
=
100
;
// config.layerConfig.set_type("featmap_expand");
config
.
layerConfig
.
set_size
(
INPUT_SIZE
*
CHANNELS
);
// const int CHANNELS = 10;
config
.
layerConfig
.
set_num_filters
(
CHANNELS
);
// const int INPUT_SIZE = 100;
config
.
inputDefs
.
push_back
({
INPUT_SEQUENCE_DATA
,
// config.layerConfig.set_size(INPUT_SIZE * CHANNELS);
"layer_0"
,
// config.layerConfig.set_num_filters(CHANNELS);
/* dim= */
INPUT_SIZE
,
// config.inputDefs.push_back({INPUT_SEQUENCE_DATA,
/* paraSize= */
0
});
// "layer_0",
config
.
layerConfig
.
add_inputs
();
// /* dim= */ INPUT_SIZE,
for
(
auto
useGpu
:
{
false
,
true
})
{
// /* paraSize= */ 0});
testLayerGrad
(
config
,
// config.layerConfig.add_inputs();
"featmap_expand"
,
// for (auto useGpu : {false, true}) {
/*batch_size*/
100
,
// testLayerGrad(config,
/* trans= */
false
,
// "featmap_expand",
useGpu
,
// /*batch_size*/ 100,
/* useWeight */
true
);
// /* trans= */ false,
}
// useGpu,
}
// /* useWeight */ true);
// }
TEST
(
Layer
,
MultiplexLayer
)
{
// }
TestConfig
config
;
//
const
int
LAYER_SIZE
=
100
;
// TEST(Layer, MultiplexLayer) {
config
.
layerConfig
.
set_type
(
"multiplex"
);
// TestConfig config;
config
.
layerConfig
.
set_size
(
LAYER_SIZE
);
// const int LAYER_SIZE = 100;
// config.layerConfig.set_type("multiplex");
config
.
inputDefs
.
push_back
({
INPUT_LABEL
,
"layer_0"
,
2
,
0
});
// config.layerConfig.set_size(LAYER_SIZE);
config
.
inputDefs
.
push_back
(
//
{
INPUT_DATA
,
"layer_1"
,
/* dim= */
LAYER_SIZE
,
/* paraSize= */
0
});
// config.inputDefs.push_back({INPUT_LABEL, "layer_0", 2, 0});
config
.
inputDefs
.
push_back
(
// config.inputDefs.push_back(
{
INPUT_DATA
,
"layer_2"
,
/* dim= */
LAYER_SIZE
,
/* paraSize= */
0
});
// {INPUT_DATA, "layer_1", /* dim= */ LAYER_SIZE, /* paraSize= */ 0});
config
.
layerConfig
.
add_inputs
();
// config.inputDefs.push_back(
config
.
layerConfig
.
add_inputs
();
// {INPUT_DATA, "layer_2", /* dim= */ LAYER_SIZE, /* paraSize= */ 0});
config
.
layerConfig
.
add_inputs
();
// config.layerConfig.add_inputs();
// config.layerConfig.add_inputs();
for
(
auto
useGpu
:
{
false
,
true
})
{
// config.layerConfig.add_inputs();
testLayerGrad
(
config
,
"multiplex"
,
512
,
/* trans= */
false
,
useGpu
);
//
}
// for (auto useGpu : {false, true}) {
}
// testLayerGrad(config, "multiplex", 512, /* trans= */ false, useGpu);
// }
// }
//
TEST
(
Layer
,
PadLayer
)
{
TEST
(
Layer
,
PadLayer
)
{
TestConfig
config
;
TestConfig
config
;
config
.
biasSize
=
0
;
config
.
biasSize
=
0
;
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
6b61a096
...
@@ -1109,7 +1109,7 @@ def parse_bilinear(bilinear, input_layer_name, bilinear_conf):
...
@@ -1109,7 +1109,7 @@ def parse_bilinear(bilinear, input_layer_name, bilinear_conf):
bilinear_conf
.
out_size_y
=
bilinear
.
out_size_y
bilinear_conf
.
out_size_y
=
bilinear
.
out_size_y
def
parse_pool
(
pool
,
input_layer_name
,
pool_conf
):
def
parse_pool
(
pool
,
input_layer_name
,
pool_conf
,
ceil_mode
):
pool_conf
.
pool_type
=
pool
.
pool_type
pool_conf
.
pool_type
=
pool
.
pool_type
config_assert
(
pool
.
pool_type
in
[
config_assert
(
pool
.
pool_type
in
[
'max-projection'
,
'avg-projection'
,
'cudnn-max-pool'
,
'cudnn-avg-pool'
'max-projection'
,
'avg-projection'
,
'cudnn-max-pool'
,
'cudnn-avg-pool'
...
@@ -1134,10 +1134,10 @@ def parse_pool(pool, input_layer_name, pool_conf):
...
@@ -1134,10 +1134,10 @@ def parse_pool(pool, input_layer_name, pool_conf):
pool_conf
.
padding_y
=
default
(
pool
.
padding_y
,
pool_conf
.
padding
)
pool_conf
.
padding_y
=
default
(
pool
.
padding_y
,
pool_conf
.
padding
)
pool_conf
.
output_x
=
cnn_output_size
(
pool_conf
.
img_size
,
pool_conf
.
size_x
,
pool_conf
.
output_x
=
cnn_output_size
(
pool_conf
.
img_size
,
pool_conf
.
size_x
,
pool_conf
.
padding
,
pool_conf
.
stride
,
pool_conf
.
padding
,
pool_conf
.
stride
,
Fals
e
)
not
ceil_mod
e
)
pool_conf
.
output_y
=
cnn_output_size
(
pool_conf
.
img_size_y
,
pool_conf
.
size_y
,
pool_conf
.
output_y
=
cnn_output_size
(
pool_conf
.
img_size_y
,
pool_conf
.
size_y
,
pool_conf
.
padding_y
,
pool_conf
.
padding_y
,
pool_conf
.
stride_y
,
Fals
e
)
pool_conf
.
stride_y
,
not
ceil_mod
e
)
def
parse_spp
(
spp
,
input_layer_name
,
spp_conf
):
def
parse_spp
(
spp
,
input_layer_name
,
spp_conf
):
...
@@ -1810,9 +1810,8 @@ class ConvTransLayer(ConvTransLayerBase):
...
@@ -1810,9 +1810,8 @@ class ConvTransLayer(ConvTransLayerBase):
@
config_layer
(
'norm'
)
@
config_layer
(
'norm'
)
class
NormLayer
(
LayerBase
):
class
NormLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
inputs
,
device
=
None
):
def
__init__
(
self
,
name
,
inputs
,
**
xargs
):
super
(
NormLayer
,
self
).
__init__
(
super
(
NormLayer
,
self
).
__init__
(
name
,
'norm'
,
0
,
inputs
=
inputs
,
**
xargs
)
name
,
'norm'
,
0
,
inputs
=
inputs
,
device
=
device
)
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
input_layer
=
self
.
get_input_layer
(
input_index
)
norm_conf
=
self
.
config
.
inputs
[
input_index
].
norm_conf
norm_conf
=
self
.
config
.
inputs
[
input_index
].
norm_conf
...
@@ -1824,23 +1823,22 @@ class NormLayer(LayerBase):
...
@@ -1824,23 +1823,22 @@ class NormLayer(LayerBase):
@
config_layer
(
'pool'
)
@
config_layer
(
'pool'
)
class
PoolLayer
(
LayerBase
):
class
PoolLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
inputs
,
device
=
None
):
def
__init__
(
self
,
name
,
inputs
,
ceil_mode
=
True
,
**
xargs
):
super
(
PoolLayer
,
self
).
__init__
(
super
(
PoolLayer
,
self
).
__init__
(
name
,
'pool'
,
0
,
inputs
=
inputs
,
**
xargs
)
name
,
'pool'
,
0
,
inputs
=
inputs
,
device
=
device
)
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
input_layer
=
self
.
get_input_layer
(
input_index
)
pool_conf
=
self
.
config
.
inputs
[
input_index
].
pool_conf
pool_conf
=
self
.
config
.
inputs
[
input_index
].
pool_conf
parse_pool
(
self
.
inputs
[
input_index
].
pool
,
input_layer
.
name
,
parse_pool
(
self
.
inputs
[
input_index
].
pool
,
input_layer
.
name
,
pool_conf
)
pool_conf
,
ceil_mode
)
self
.
set_cnn_layer
(
name
,
pool_conf
.
output_y
,
pool_conf
.
output_x
,
self
.
set_cnn_layer
(
name
,
pool_conf
.
output_y
,
pool_conf
.
output_x
,
pool_conf
.
channels
)
pool_conf
.
channels
)
@
config_layer
(
'spp'
)
@
config_layer
(
'spp'
)
class
SpatialPyramidPoolLayer
(
LayerBase
):
class
SpatialPyramidPoolLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
inputs
,
device
=
None
):
def
__init__
(
self
,
name
,
inputs
,
**
xargs
):
super
(
SpatialPyramidPoolLayer
,
self
).
__init__
(
super
(
SpatialPyramidPoolLayer
,
self
).
__init__
(
name
,
'spp'
,
0
,
inputs
=
inputs
,
device
=
device
)
name
,
'spp'
,
0
,
inputs
=
inputs
,
**
xargs
)
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
input_layer
=
self
.
get_input_layer
(
input_index
)
spp_conf
=
self
.
config
.
inputs
[
input_index
].
spp_conf
spp_conf
=
self
.
config
.
inputs
[
input_index
].
spp_conf
...
@@ -1877,7 +1875,6 @@ class BatchNormLayer(LayerBase):
...
@@ -1877,7 +1875,6 @@ class BatchNormLayer(LayerBase):
inputs
,
inputs
,
active_type
=
"linear"
,
active_type
=
"linear"
,
bias
=
True
,
bias
=
True
,
device
=
None
,
use_global_stats
=
True
,
use_global_stats
=
True
,
moving_average_fraction
=
0.9
,
moving_average_fraction
=
0.9
,
batch_norm_type
=
None
,
batch_norm_type
=
None
,
...
@@ -1919,7 +1916,6 @@ class BatchNormLayer(LayerBase):
...
@@ -1919,7 +1916,6 @@ class BatchNormLayer(LayerBase):
0
,
0
,
active_type
=
active_type
,
active_type
=
active_type
,
inputs
=
inputs
,
inputs
=
inputs
,
device
=
device
,
**
xargs
)
**
xargs
)
if
use_global_stats
is
not
None
:
if
use_global_stats
is
not
None
:
...
@@ -1953,9 +1949,9 @@ class BatchNormLayer(LayerBase):
...
@@ -1953,9 +1949,9 @@ class BatchNormLayer(LayerBase):
@
config_layer
(
'trans'
)
@
config_layer
(
'trans'
)
class
TransLayer
(
LayerBase
):
class
TransLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
inputs
,
device
=
None
):
def
__init__
(
self
,
name
,
inputs
,
**
xargs
):
super
(
TransLayer
,
self
).
__init__
(
super
(
TransLayer
,
self
).
__init__
(
name
,
'trans'
,
0
,
inputs
=
inputs
,
device
=
device
)
name
,
'trans'
,
0
,
inputs
=
inputs
,
**
xargs
)
config_assert
(
config_assert
(
len
(
self
.
inputs
)
==
1
,
len
(
self
.
inputs
)
==
1
,
'TransLayer must have one and only one input'
)
'TransLayer must have one and only one input'
)
...
@@ -1964,9 +1960,9 @@ class TransLayer(LayerBase):
...
@@ -1964,9 +1960,9 @@ class TransLayer(LayerBase):
@
config_layer
(
'resize'
)
@
config_layer
(
'resize'
)
class
ResizeLayer
(
LayerBase
):
class
ResizeLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
size
,
inputs
,
device
=
None
):
def
__init__
(
self
,
name
,
size
,
inputs
,
**
xargs
):
super
(
ResizeLayer
,
self
).
__init__
(
super
(
ResizeLayer
,
self
).
__init__
(
name
,
'resize'
,
size
=
size
,
inputs
=
inputs
,
device
=
device
)
name
,
'resize'
,
size
=
size
,
inputs
=
inputs
,
**
xargs
)
config_assert
(
config_assert
(
len
(
self
.
inputs
)
==
1
,
len
(
self
.
inputs
)
==
1
,
'ResizeLayer must have one and only one input'
)
'ResizeLayer must have one and only one input'
)
...
@@ -1974,9 +1970,9 @@ class ResizeLayer(LayerBase):
...
@@ -1974,9 +1970,9 @@ class ResizeLayer(LayerBase):
@
config_layer
(
'blockexpand'
)
@
config_layer
(
'blockexpand'
)
class
BlockExpandLayer
(
LayerBase
):
class
BlockExpandLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
inputs
,
device
=
None
):
def
__init__
(
self
,
name
,
inputs
,
**
xargs
):
super
(
BlockExpandLayer
,
self
).
__init__
(
super
(
BlockExpandLayer
,
self
).
__init__
(
name
,
'blockexpand'
,
0
,
inputs
=
inputs
,
device
=
device
)
name
,
'blockexpand'
,
0
,
inputs
=
inputs
,
**
xargs
)
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
input_layer
=
self
.
get_input_layer
(
input_index
)
parse_block_expand
(
parse_block_expand
(
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
6b61a096
...
@@ -1980,7 +1980,8 @@ def img_pool_layer(input,
...
@@ -1980,7 +1980,8 @@ def img_pool_layer(input,
layer_attr
=
None
,
layer_attr
=
None
,
pool_size_y
=
None
,
pool_size_y
=
None
,
stride_y
=
None
,
stride_y
=
None
,
padding_y
=
None
):
padding_y
=
None
,
ceil_mode
=
True
):
"""
"""
Image pooling Layer.
Image pooling Layer.
...
@@ -2011,6 +2012,23 @@ def img_pool_layer(input,
...
@@ -2011,6 +2012,23 @@ def img_pool_layer(input,
:type stride_y: int|None
:type stride_y: int|None
:param layer_attr: Extra Layer attribute.
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:type layer_attr: ExtraLayerAttribute
:param ceil_mode: Wether to use ceil mode to calculate output height and with.
Defalut is True. If set false, Otherwise use floor.
- ceil_mode=True:
.. math::
w = 1 + int(ceil(input_width + 2 * padding - pool_size) / float(stride))
h = 1 + int(ceil(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
- ceil_mode=False:
.. math::
w = 1 + int(floor(input_width + 2 * padding - pool_size) / float(stride))
h = 1 + int(floor(input_height + 2 * padding_y - pool_size_y) / float(stride_y))
:type ceil_mode: bool
:return: LayerOutput object.
:return: LayerOutput object.
:rtype: LayerOutput
:rtype: LayerOutput
"""
"""
...
@@ -2048,6 +2066,7 @@ def img_pool_layer(input,
...
@@ -2048,6 +2066,7 @@ def img_pool_layer(input,
stride_y
=
stride_y
,
stride_y
=
stride_y
,
padding_y
=
padding_y
))
padding_y
=
padding_y
))
],
],
ceil_mode
=
ceil_mode
,
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
return
LayerOutput
(
return
LayerOutput
(
name
,
name
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录