From d4d5052fa6c94b51ace4c436ad1ee562fc69e138 Mon Sep 17 00:00:00 2001 From: lujun Date: Fri, 29 Mar 2019 12:18:37 +0800 Subject: [PATCH] fix merge for move dir, fix utest error, test=develop --- python/paddle/fluid/dygraph/nn.py | 27 ++++++++++--------- .../fluid/tests/unittests/test_layers.py | 13 +++------ 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 8600cd9f06..b5d11033c2 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -19,7 +19,7 @@ from six.moves import reduce from .. import core from ..layers import utils from . import layers -from ..framework import Variable +from ..framework import Variable, _in_dygraph_mode from ..param_attr import ParamAttr from ..initializer import Normal, Constant, NumpyArrayInitializer import numpy as np @@ -1560,6 +1560,8 @@ class SequenceConv(layers.Layer): bias_attr=None, param_attr=None, act=None): + assert not _in_dygraph_mode(), \ + "SequenceConv is not supported by dynamic graph mode yet!" super(SequenceConv, self).__init__(name_scope) self._num_filters = num_filters self._filter_size = filter_size @@ -1569,12 +1571,10 @@ class SequenceConv(layers.Layer): self._param_attr = param_attr def _build_once(self, input): - self._dtype = self._helper.input_dtype(input) - print(self._filter_size) filter_shape = [self._filter_size * input.shape[1], self._num_filters] self._filter_param = self.create_parameter( - attr=self.param_attr, shape=filter_shape, dtype=self._dtype) + attr=self._param_attr, shape=filter_shape, dtype=self._dtype) def forward(self, input): pre_bias = self._helper.create_variable_for_type_inference(self._dtype) @@ -1600,23 +1600,28 @@ class RowConv(layers.Layer): future_context_size, param_attr=None, act=None): + assert not _in_dygraph_mode(), \ + "RowConv is not supported by dynamic graph mode yet!" super(RowConv, self).__init__(name_scope) self._act = act self._param_attr = param_attr self._future_context_size = future_context_size - def _buils_once(self, input): + def _build_once(self, input): self._dtype = self._helper.input_dtype(input) filter_shape = [self._future_context_size + 1, input.shape[1]] - self._f = self.create_parameter( - attr=self._param_attr, shape=filter_shape, dtype=self._dtype) + self._filter_param = self.create_parameter( + attr=self._param_attr, + shape=filter_shape, + dtype=self._dtype, + is_bias=False) def forward(self, input): out = self._helper.create_variable_for_type_inference(self._dtype) self._helper.append_op( type='row_conv', inputs={'X': [input], - 'Filter': [self._f]}, + 'Filter': [self._filter_param]}, outputs={'Out': [out]}) return self._helper.append_activation(out, act=self._act) @@ -1665,7 +1670,7 @@ class GroupNorm(layers.Layer): if data_layout != 'NCHW': raise ValueError("unsupported data layout:" + data_layout) - def _buils_once(self, input): + def _build_once(self, input): self._dtype = self._helper.input_dtype(input) param_shape = [input.shape[1]] if self._bias_attr: @@ -1690,10 +1695,8 @@ class GroupNorm(layers.Layer): inputs['Scale'] = self._scale # create output - mean_out = self._helper.create_variable( + mean_out = self._helper.create_variable_for_type_inference( dtype=self._dtype, stop_gradient=True) - self.create_variable( - name="mean_out", persistable=True, type=self._dtype) variance_out = self._helper.create_variable_for_type_inference( dtype=self._dtype, stop_gradient=True) group_norm_out = self._helper.create_variable_for_type_inference( diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index a0ccfc4ae4..6873bec2f9 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -626,18 +626,13 @@ class TestLayer(LayerTest): static_ret2 = self.get_static_graph_result( feed={ 'X': fluid.create_lod_tensor( - data=input, - recursive_seq_lens=[[1, 1, 1]], - place=place, - with_lod=True) + data=input, recursive_seq_lens=[[1, 1, 1]], place=place) }, - fetch_list=[ret])[0] + fetch_list=[ret], + with_lod=True)[0] - with self.dynamic_graph(): - rowConv = nn.RowConv('RowConv', future_context_size=2) - dy_ret = rowConv(base.to_variable(input)) + # TODO: dygraph can't support LODTensor - self.assertTrue(np.allclose(static_ret, dy_ret._numpy())) self.assertTrue(np.allclose(static_ret, static_ret2)) def test_group_norm(self): -- GitLab