From 3c5e0f8a5db328205d29916a8fc9a6ced9a2e2ba Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 11 May 2017 15:18:06 +0800 Subject: [PATCH] remove g_pass_height_width in config_parse.py --- python/paddle/trainer/config_parser.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 32e31fe2c4..57d30b088b 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -138,14 +138,7 @@ def init_config_environment( g_root_submodel=None, g_submodel_map={}, g_submodel_stack=[], - g_add_submodel_suffix=False, - - # Whether current layer needs to pass the image height and width. - # Default value is true, but if it encounters recurrent_layer_group, - # it will be false. The reason is that image is converted to be sequence, - # image height will be sequence length, and image width will be feature - # length of each timestep. - g_pass_height_width=True, ): + g_add_submodel_suffix=False, ): for k, v in locals().iteritems(): globals()[k] = copy.deepcopy(v) @@ -1437,12 +1430,6 @@ class LayerBase(object): g_current_submodel.layer_names.append(self.config.name) - if self.config.type != 'data' and g_pass_height_width: - height = self.get_input_layer(0).height - width = self.get_input_layer(0).width - if height and width: - self.set_layer_height_width(height, width) - def get_input_layer(self, input_index): return g_layer_map[self.config.inputs[input_index].input_layer_name] @@ -3164,8 +3151,6 @@ class WarpCTCLayer(LayerBase): @config_layer('recurrent_layer_group') class RecurrentLayerGroup(LayerBase): def __init__(self, name, device=None): - global g_pass_height_width - g_pass_height_width = False super(RecurrentLayerGroup, self).__init__( name, 'recurrent_layer_group', 0, inputs=[], device=device) -- GitLab