diff --git a/dygraph/ocr_recognition/train.py b/dygraph/ocr_recognition/train.py index f333d3e293293f29b9e639c86415fa1fd824c786..234969e967072c682320ce41c50db113eaab89c4 100644 --- a/dygraph/ocr_recognition/train.py +++ b/dygraph/ocr_recognition/train.py @@ -331,9 +331,6 @@ class SimpleAttention(fluid.dygraph.Layer): act=None, bias_attr=False) - def _build_once(self, encoder_vec, encoder_proj, decoder_state): - pass - def forward(self, encoder_vec, encoder_proj, decoder_state): decoder_state_fc = self.fc_1(decoder_state) @@ -381,9 +378,6 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer): self.decoder_size = decoder_size - def _build_once(self, target_embedding, encoder_vec, encoder_proj, - decoder_boot): - pass def forward(self, target_embedding, encoder_vec, encoder_proj, decoder_boot): @@ -431,8 +425,6 @@ class OCRAttention(fluid.dygraph.Layer): self.gru_decoder_with_attention = GRUDecoderWithAttention( self.full_name(), Config.decoder_size, Config.num_classes) - def _build_once(self, inputs, label_in): - pass def forward(self, inputs, label_in): gru_backward, encoded_vector, encoded_proj = self.encoder_net(inputs) diff --git a/dygraph/pbt_lm/ptb_dy.py b/dygraph/pbt_lm/ptb_dy.py index 11df408fdf106a79ccd5702f280c176da919dba4..f93a80085be5704d3ed970f05475c136005e73de 100644 --- a/dygraph/pbt_lm/ptb_dy.py +++ b/dygraph/pbt_lm/ptb_dy.py @@ -56,7 +56,6 @@ class SimpleLSTMRNN(fluid.Layer): self.cell_array = [] self.hidden_array = [] - #build_once() self.weight_1_arr = [] self.weight_2_arr = [] self.bias_arr = [] @@ -81,35 +80,6 @@ class SimpleLSTMRNN(fluid.Layer): default_initializer=fluid.initializer.Constant(0.0)) self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) - def build_once(self, input_embedding, init_hidden=None, init_cell=None): - - pass - ''' - self.weight_1_arr = [] - self.weight_2_arr = [] - self.bias_arr = [] - self.mask_array = [] - - for i in range(self._num_layers): - weight_1 = self.create_parameter( - attr=fluid.ParamAttr( - initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), - shape=[self._hidden_size * 2, self._hidden_size * 4], - dtype="float32", - default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) - self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) - bias_1 = self.create_parameter( - attr=fluid.ParamAttr( - initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)), - shape=[self._hidden_size * 4], - dtype="float32", - default_initializer=fluid.initializer.Constant(0.0)) - self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) - ''' - def forward(self, input_embedding, init_hidden=None, init_cell=None): self.cell_array = [] self.hidden_array = []