name: "densenet" input: "data" input_dim: 1 input_dim: 3 input_dim: 32 input_dim: 280 layer { bottom: "data" top: "conv1" name: "conv1" type: "Convolution" param { lr_mult: 1 decay_mult: 1 } convolution_param { num_output: 64 kernel_size: 5 pad: 2 stride: 2 weight_filler { type: "msra"} bias_filler { type: "constant" value: 0 } } } # DenseBlock 1 layer { name: "DenseBlock1" type: "DenseBlock" bottom: "conv1" top: "DenseBlock1" denseblock_param { numTransition: 8 initChannel: 64 growthRate: 8 Filter_Filler { type: "msra" } BN_Scaler_Filler { type: "constant" value: 1 } BN_Bias_Filler { type: "constant" value: 0 } use_dropout: false dropout_amount: 0.2 } } layer { name: "BatchNorm1" type: "BatchNorm" bottom: "DenseBlock1" top: "BatchNorm1" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale1" type: "Scale" bottom: "BatchNorm1" top: "BatchNorm1" scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "ReLU1" type: "ReLU" bottom: "BatchNorm1" top: "BatchNorm1" } layer { name: "Convolution2" type: "Convolution" bottom: "BatchNorm1" top: "Convolution2" convolution_param { num_output: 128 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "msra" } } } layer { name: "Dropout1" type: "Dropout" bottom: "Convolution2" top: "Dropout1" dropout_param { dropout_ratio: 0.2 } } layer { name: "Pooling1" type: "Pooling" bottom: "Dropout1" top: "Pooling1" pooling_param { pool: AVE kernel_size: 2 stride: 2 } } # DenseBlock 2 layer { name: "DenseBlock2" type: "DenseBlock" bottom: "Pooling1" top: "DenseBlock2" denseblock_param { numTransition: 8 initChannel: 64 growthRate: 8 Filter_Filler { type: "msra" } BN_Scaler_Filler { type: "constant" value: 1 } BN_Bias_Filler { type: "constant" value: 0 } use_dropout: false dropout_amount: 0.2 } } layer { name: "BatchNorm2" type: "BatchNorm" bottom: "DenseBlock2" top: "BatchNorm2" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale2" type: "Scale" bottom: "BatchNorm2" top: "BatchNorm2" scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "ReLU2" type: "ReLU" bottom: "BatchNorm2" top: "BatchNorm2" } layer { name: "Convolution3" type: "Convolution" bottom: "BatchNorm2" top: "Convolution3" convolution_param { num_output: 192 bias_term: false pad: 0 kernel_size: 1 stride: 1 weight_filler { type: "msra" } } } layer { name: "Dropout2" type: "Dropout" bottom: "Convolution3" top: "Convolution3" dropout_param { dropout_ratio: 0.2 } } layer { name: "Pooling2" type: "Pooling" bottom: "Convolution3" top: "Pooling2" pooling_param { pool: AVE kernel_size: 2 stride: 2 } } # DenseBlock 3 layer { name: "DenseBlock3" type: "DenseBlock" bottom: "Pooling2" top: "DenseBlock3" denseblock_param { numTransition: 8 initChannel: 64 growthRate: 8 Filter_Filler { type: "msra" } BN_Scaler_Filler { type: "constant" value: 1 } BN_Bias_Filler { type: "constant" value: 0 } use_dropout: false dropout_amount: 0.2 } } layer { name: "BatchNorm3" type: "BatchNorm" bottom: "DenseBlock3" top: "BatchNorm3" param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } param { lr_mult: 0 decay_mult: 0 } } layer { name: "Scale3" type: "Scale" bottom: "BatchNorm3" top: "BatchNorm3" scale_param { filler { value: 1 } bias_term: true bias_filler { value: 0 } } } layer { name: "ReLU3" type: "ReLU" bottom: "BatchNorm3" top: "BatchNorm3" } layer { name: "pool5_ave" type: "Pooling" bottom: "BatchNorm3" top: "pool5_ave" pooling_param { pool: AVE kernel_w: 1 kernel_h: 4 stride_w: 1 stride_h: 1 } } layer { name: "pool5_ave_transpose" top: "pool5_ave_transpose" bottom: "pool5_ave" type: "Transpose" transpose_param { dim: 3 dim: 2 dim: 0 dim: 1 } } layer { name: "blstm_input" type: "Reshape" bottom: "pool5_ave_transpose" top: "blstm_input" reshape_param { shape { dim: -1 } axis: 1 num_axes: 2 } } #===================blstm layer 1============================ #======lstm1=================== layer { name: "lstm1" type: "Lstm" bottom: "blstm_input" top: "lstm1" lstm_param { num_output: 256 weight_filler { type: "gaussian" std: 0.1 } bias_filler { type: "constant" } } } # =====lstm1_reverse=================== layer { name: "lstm1-reverse1" type: "Reverse" bottom: "blstm_input" top: "rlstm1_input" reverse_param { axis: 0 } } layer { name: "rlstm1" type: "Lstm" bottom: "rlstm1_input" top: "rlstm1-output" lstm_param { num_output: 256 weight_filler { type: "gaussian" std: 0.1 } bias_filler { type: "constant" } } } layer { name: "lstm1-reverse2" type: "Reverse" bottom: "rlstm1-output" top: "rlstm1" reverse_param { axis: 0 } } # merge lstm1 and rlstm1 layer { name: "blstm1" type: "Eltwise" bottom: "lstm1" bottom: "rlstm1" bottom: "blstm_input" top: "blstm1" eltwise_param { operation: SUM } } #===================blstm layer 2============================ #======lstm2=================== layer { name: "lstm2" type: "Lstm" bottom: "blstm1" top: "lstm2" lstm_param { num_output: 256 weight_filler { type: "gaussian" std: 0.1 } bias_filler { type: "constant" } } } # =====lstm2_reverse=================== layer { name: "lstm2-reverse1" type: "Reverse" bottom: "blstm1" top: "rlstm2_input" reverse_param { axis: 0 } } layer { name: "rlstm2" type: "Lstm" bottom: "rlstm2_input" top: "rlstm2-output" lstm_param { num_output: 256 weight_filler { type: "gaussian" std: 0.1 } bias_filler { type: "constant" } } } layer { name: "lstm2-reverse2" type: "Reverse" bottom: "rlstm2-output" top: "rlstm2" reverse_param { axis: 0 } } # merge lstm2 and rlstm2 layer { name: "blstm2" type: "Eltwise" bottom: "lstm2" bottom: "rlstm2" bottom: "blstm1" bottom: "blstm_input" top: "blstm2" eltwise_param { operation: SUM } } layer { name: "fc1x_69" type: "InnerProduct" bottom: "blstm2" top: "fc1x" param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0 } inner_product_param { axis: 2 num_output: 69 weight_filler { type: "xavier" } bias_filler { type: "constant" value: 0 } } } layer { name: "result" type: "CTCGreedyDecoder" bottom: "fc1x" top: "result" }