提交 cd7c55aa 编写于 作者: D dangqingqing

generate protostr automatically when adding a new test for trainer_config_helpers.

上级 68b958c3
...@@ -5,6 +5,6 @@ last_first_seq test_expand_layer test_ntm_layers test_hsigmoid ...@@ -5,6 +5,6 @@ last_first_seq test_expand_layer test_ntm_layers test_hsigmoid
img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers
test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat_reshape) test_seq_concat_reshape test_pad)
export whole_configs=(test_split_datasource) export whole_configs=(test_split_datasource)
...@@ -11,6 +11,9 @@ for conf in ${configs[*]} ...@@ -11,6 +11,9 @@ for conf in ${configs[*]}
do do
echo "Generating " $conf echo "Generating " $conf
$1 -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unittest $1 -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unittest
if [ ! -f "$protostr/$conf.protostr" ]; then
cp $protostr/$conf.protostr.unittest $protostr/$conf.protostr
fi
cat ${conf}.py |$1 test_config_parser_for_non_file_config.py > $protostr/$conf.protostr.non_file_config.unittest cat ${conf}.py |$1 test_config_parser_for_non_file_config.py > $protostr/$conf.protostr.non_file_config.unittest
done done
...@@ -18,5 +21,8 @@ for conf in ${whole_configs[*]} ...@@ -18,5 +21,8 @@ for conf in ${whole_configs[*]}
do do
echo "Generating " $conf echo "Generating " $conf
$1 -m paddle.utils.dump_config $conf.py "" --whole > $protostr/$conf.protostr.unittest $1 -m paddle.utils.dump_config $conf.py "" --whole > $protostr/$conf.protostr.unittest
if [ ! -f "$protostr/$conf.protostr" ]; then
cp $protostr/$conf.protostr.unittest $protostr/$conf.protostr
fi
cat ${conf}.py |$1 test_config_parser_for_non_file_config.py --whole > $protostr/$conf.protostr.non_file_config.unittest cat ${conf}.py |$1 test_config_parser_for_non_file_config.py --whole > $protostr/$conf.protostr.non_file_config.unittest
done done
type: "nn"
layers {
name: "data"
type: "data"
size: 2016
active_type: ""
height: 48
width: 42
}
layers {
name: "__conv_0__"
type: "exconv"
size: 32256
active_type: ""
inputs {
input_layer_name: "data"
input_parameter_name: "___conv_0__.w0"
conv_conf {
filter_size: 3
channels: 1
stride: 1
padding: 1
groups: 1
filter_channels: 1
output_x: 42
img_size: 42
caffe_mode: true
filter_size_y: 3
padding_y: 1
stride_y: 1
output_y: 48
img_size_y: 48
}
}
bias_parameter_name: "___conv_0__.wbias"
num_filters: 16
shared_biases: true
height: 48
width: 42
}
layers {
name: "__pool_0__"
type: "pool"
size: 8064
active_type: ""
inputs {
input_layer_name: "__conv_0__"
pool_conf {
pool_type: "max-projection"
channels: 16
size_x: 2
stride: 2
output_x: 21
img_size: 42
padding: 0
size_y: 2
stride_y: 2
output_y: 24
img_size_y: 48
padding_y: 0
}
}
height: 24
width: 21
}
layers {
name: "__pad_0__"
type: "pad"
size: 14175
active_type: ""
inputs {
input_layer_name: "__pool_0__"
pad_conf {
image_conf {
channels: 16
img_size: 21
img_size_y: 24
}
pad_c: 2
pad_c: 3
pad_h: 1
pad_h: 2
pad_w: 3
pad_w: 1
}
}
height: 27
width: 25
}
parameters {
name: "___conv_0__.w0"
size: 144
initial_mean: 0.0
initial_std: 0.471404520791
initial_strategy: 0
initial_smart: false
}
parameters {
name: "___conv_0__.wbias"
size: 16
initial_mean: 0.0
initial_std: 0.0
dims: 16
dims: 1
initial_strategy: 0
initial_smart: false
}
input_layer_names: "data"
output_layer_names: "__pad_0__"
sub_models {
name: "root"
layer_names: "data"
layer_names: "__conv_0__"
layer_names: "__pool_0__"
layer_names: "__pad_0__"
input_layer_names: "data"
output_layer_names: "__pad_0__"
is_recurrent_layer_group: false
}
...@@ -2,7 +2,7 @@ from paddle.trainer_config_helpers import * ...@@ -2,7 +2,7 @@ from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5) settings(batch_size=1000, learning_rate=1e-5)
data = data_layer(name='data', size=2304, height=48, width=42) data = data_layer(name='data', size=2016, height=48, width=42)
conv = img_conv_layer( conv = img_conv_layer(
input=data, input=data,
...@@ -13,8 +13,7 @@ conv = img_conv_layer( ...@@ -13,8 +13,7 @@ conv = img_conv_layer(
act=LinearActivation(), act=LinearActivation(),
bias_attr=True) bias_attr=True)
pool = img_pool_layer( pool = img_pool_layer(input=conv, pool_size=2, stride=2, pool_type=MaxPooling())
input=conv, num_channels=8, pool_size=2, stride=2, pool_type=MaxPooling())
pad = pad_layer(input=pool, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1]) pad = pad_layer(input=pool, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册