diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 054feae29461aacb0702488f258e6117bd92b80e..ef3ee0a7cd34d6abd19a34d56602911d61756223 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4298,14 +4298,16 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): dim = [dim] if in_dygraph_mode(): - reduce_all = True if dim == None or dim == [] else False + reduce_all = True if dim == None or dim == [] or len(dim) == len( + input.shape) else False dim = dim if dim != None and dim != [] else [0] return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all) attrs = { 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, - 'reduce_all': True if dim == None or dim == [] else False + 'reduce_all': True + if dim == None or dim == [] or len(dim) == len(input.shape) else False } check_variable_and_dtype( input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum') @@ -4373,14 +4375,16 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): dim = [dim] if in_dygraph_mode(): - reduce_all = True if dim == None or dim == [] else False + reduce_all = True if dim == None or dim == [] or len(dim) == len( + input.shape) else False dim = dim if dim != None and dim != [] else [0] return core.ops.reduce_mean(input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all) attrs = { 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, - 'reduce_all': True if dim == None or dim == [] else False + 'reduce_all': True + if dim == None or dim == [] or len(dim) == len(input.shape) else False } check_variable_and_dtype( input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_mean') @@ -4450,7 +4454,8 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, - 'reduce_all': True if dim == None or dim == [] else False + 'reduce_all': True if dim == None or dim == [] or + len(dim) == len(input.shape) else False }) return out @@ -4511,7 +4516,8 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, - 'reduce_all': True if dim == None or dim == [] else False + 'reduce_all': True if dim == None or dim == [] or + len(dim) == len(input.shape) else False }) return out @@ -4573,7 +4579,8 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, - 'reduce_all': True if dim == None or dim == [] else False + 'reduce_all': True if dim == None or dim == [] or + len(dim) == len(input.shape) else False }) return out @@ -4631,7 +4638,8 @@ def reduce_all(input, dim=None, keep_dim=False, name=None): attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, - 'reduce_all': True if dim == None or dim == [] else False + 'reduce_all': True if dim == None or dim == [] or + len(dim) == len(input.shape) else False }) return out @@ -4689,7 +4697,8 @@ def reduce_any(input, dim=None, keep_dim=False, name=None): attrs={ 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, - 'reduce_all': True if dim == None or dim == [] else False + 'reduce_all': True if dim == None or dim == [] or + len(dim) == len(input.shape) else False }) return out diff --git a/python/paddle/fluid/tests/unittests/test_variance_layer.py b/python/paddle/fluid/tests/unittests/test_variance_layer.py index a007093fbdb31b6b356201fb8fda7ab87b6f9f08..569f064db8549b5f28bc751a36cbe4b379636379 100644 --- a/python/paddle/fluid/tests/unittests/test_variance_layer.py +++ b/python/paddle/fluid/tests/unittests/test_variance_layer.py @@ -56,7 +56,7 @@ class TestVarianceLayer(unittest.TestCase): return np.var(self._input, axis=axis, keepdims=keepdim, ddof=ddof) def test_equal(self): - places = [] + places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) for place in places: