diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 3dfb50781e7e4c3db330fc4b337e65f27bf849d2..eca8d060b0f2f79d30ec8abda57aeeb2677d8c16 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -150,12 +150,12 @@ class Layer(core.Layer): if p.trainable: p.clear_gradient() - def build_once(self, *args): + def _build_once(self, *args): pass def __call__(self, *inputs): if not self._built: - self.build_once(*inputs) + self._build_once(*inputs) if parallel_helper._is_data_parallel_mode(): parallel_helper._broadcast_parameters(self._parameters.values()) diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 64a195250a70d8b682018770993160a1a1d9b452..4bc361102845824f0161922824a9207e1073cbcf 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -372,7 +372,7 @@ class Conv3D(layers.Layer): self._param_attr = param_attr self._bias_attr = bias_attr - def build_once(self, input): + def _build_once(self, input): num_channels = input.shape[1] self._dtype = self._helper.input_dtype(input) @@ -579,7 +579,7 @@ class Conv3DTranspose(layers.Layer): self._bias_attr = bias_attr self._act = act - def build_once(self, input): + def _build_once(self, input): self._dtype = self._helper.input_dtype(input) self._input_channel = input.shape[1] @@ -883,7 +883,7 @@ class FC(layers.Layer): assert isinstance(value, Parameter) self.__w[i] = value - def build_once(self, input): + def _build_once(self, input): i = 0 for inp, param in self._helper.iter_inputs_and_params(input, self._param_attr): @@ -1112,7 +1112,7 @@ class BatchNorm(layers.Layer): self._fuse_with_relu = fuse_with_relu self._use_global_stats = use_global_stats - def build_once(self, input): + def _build_once(self, input): pass def forward(self, input): @@ -1189,6 +1189,7 @@ class Embedding(layers.Layer): supplied inputs. Examples: + .. code-block:: python dict_size = len(dataset.ids) @@ -1316,7 +1317,7 @@ class LayerNorm(layers.Layer): self._bias_attr = bias_attr self._act = act - def build_once(self, input): + def _build_once(self, input): self._dtype = self._helper.input_dtype(input) input_shape = input.shape param_shape = [ @@ -1678,7 +1679,7 @@ class NCE(layers.Layer): 'remote_prefetch': remote_prefetch } - def build_once(self, input, label, sample_weight=None): + def _build_once(self, input, label, sample_weight=None): assert isinstance(input, Variable) assert isinstance(label, Variable) @@ -1764,7 +1765,7 @@ class PRelu(layers.Layer): raise ValueError('mode should be one of all, channel, element.') self._alpha_shape = [1] - def build_once(self, input): + def _build_once(self, input): if self._mode == 'channel': self._alpha_shape = [1, input.shape[1], 1, 1] elif self._mode == 'element': @@ -1842,7 +1843,7 @@ class BilinearTensorProduct(layers.Layer): self._name = name self._inputs = dict() - def build_once(self, x, y): + def _build_once(self, x, y): self._dtype = self._helper.input_dtype(x) param_shape = [self._size, x.shape[1], y.shape[1]] @@ -2018,7 +2019,7 @@ class Conv2DTranspose(layers.Layer): self._output_size = output_size self._op_type = 'conv2d_transpose' - def build_once(self, input): + def _build_once(self, input): input_channel = input.shape[1] if (input_channel == self._groups and self._num_filters == input_channel and not self._use_cudnn): @@ -2142,7 +2143,7 @@ class SequenceConv(layers.Layer): self._bias_attr = bias_attr self._param_attr = param_attr - def build_once(self, input): + def _build_once(self, input): self._dtype = self._helper.input_dtype(input) filter_shape = [self._filter_size * input.shape[1], self._num_filters] self._filter_param = self.create_parameter( @@ -2179,7 +2180,7 @@ class RowConv(layers.Layer): self._param_attr = param_attr self._future_context_size = future_context_size - def build_once(self, input): + def _build_once(self, input): self._dtype = self._helper.input_dtype(input) filter_shape = [self._future_context_size + 1, input.shape[1]] self._filter_param = self.create_parameter( @@ -2242,7 +2243,7 @@ class GroupNorm(layers.Layer): if data_layout != 'NCHW': raise ValueError("unsupported data layout:" + data_layout) - def build_once(self, input): + def _build_once(self, input): self._dtype = self._helper.input_dtype(input) param_shape = [input.shape[1]] if self._bias_attr: @@ -2295,7 +2296,7 @@ class SpectralNorm(layers.Layer): self._eps = eps self._dim = dim - def build_once(self, weight): + def _build_once(self, weight): self._dtype = self._helper.input_dtype(weight) input_shape = weight.shape h = input_shape[self._dim] @@ -2350,7 +2351,7 @@ class TreeConv(layers.Layer): self._bias_attr = bias_attr self._param_attr = param_attr - def build_once(self, nodes_vector, edge_set): + def _build_once(self, nodes_vector, edge_set): assert isinstance(nodes_vector, Variable) assert isinstance(edge_set, Variable) self._dtype = self._helper.input_dtype(nodes_vector) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index f6d3777405cf6f388df4efc4dc0eafd9880385c0..afa21a375a4da29c1ea964eb66f792f0cc7a0356 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -67,7 +67,7 @@ class SimpleRNNCell(fluid.Layer): self._dtype = core.VarDesc.VarType.FP32 self.param_attr = param_attr - def build_once(self, inputs, pre_hidden): + def _build_once(self, inputs, pre_hidden): i2h_param_shape = [self.step_input_size, self.hidden_size] h2h_param_shape = [self.hidden_size, self.hidden_size] h2o_param_shape = [self.output_size, self.hidden_size] diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index f978ae58bac912eab6ca6a6524f9cc8ef6cb2108..f3a231e7b28343eed6a546113ded20c20247d40f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -307,9 +307,6 @@ class SimpleAttention(fluid.dygraph.Layer): act=None, bias_attr=False) - def _build_once(self, encoder_vec, encoder_proj, decoder_state): - pass - def forward(self, encoder_vec, encoder_proj, decoder_state): decoder_state_fc = self.fc_1(decoder_state) @@ -358,10 +355,6 @@ class GRUDecoderWithAttention(fluid.dygraph.Layer): self.decoder_size = decoder_size - def _build_once(self, target_embedding, encoder_vec, encoder_proj, - decoder_boot): - pass - def forward(self, target_embedding, encoder_vec, encoder_proj, decoder_boot): res = [] @@ -402,9 +395,6 @@ class OCRAttention(fluid.dygraph.Layer): self.gru_decoder_with_attention = GRUDecoderWithAttention( self.full_name(), Config.decoder_size, Config.num_classes) - def _build_once(self, inputs, label_in): - pass - def forward(self, inputs, label_in): gru_backward, encoded_vector, encoded_proj = self.encoder_net(inputs) backward_first = fluid.layers.slice( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 088d36be2327a91da0efc639d7f970ed9e43d151..5f6c5b1cb6a5a641b23dbbd82b98c78313efb1ca 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -44,7 +44,7 @@ class SimpleLSTMRNN(fluid.Layer): self.cell_array = [] self.hidden_array = [] - def build_once(self, input_embedding, init_hidden=None, init_cell=None): + def _build_once(self, input_embedding, init_hidden=None, init_cell=None): self.weight_1_arr = [] self.weight_2_arr = [] self.bias_arr = [] @@ -176,9 +176,6 @@ class PtbModel(fluid.Layer): default_initializer=fluid.initializer.UniformInitializer( low=-self.init_scale, high=self.init_scale)) - def build_once(self, input, label, init_hidden, init_cell): - pass - def forward(self, input, label, init_hidden, init_cell): init_h = fluid.layers.reshape( init_hidden, shape=[self.num_layers, -1, self.hidden_size])