diff --git a/paddle/phi/kernels/reduce_all_kernel.cc b/paddle/phi/kernels/reduce_all_kernel.cc index 9b4515ee2909f73f4c8a21463ca5d17effc7e281..5b8d2cbecca5f5c3b85de2f9bac96bc51a0e319b 100644 --- a/paddle/phi/kernels/reduce_all_kernel.cc +++ b/paddle/phi/kernels/reduce_all_kernel.cc @@ -26,6 +26,9 @@ void AllKernel(const Context& dev_ctx, bool keep_dim, DenseTensor* out) { bool reduce_all = false; + if (dims.size() == 0 || static_cast(dims.size()) == x.dims().size()) { + reduce_all = true; + } AllRawKernel(dev_ctx, x, dims, keep_dim, reduce_all, out); } diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 53994eed80f107b0a874e695bb67e9b6d13db1e9..b5615ee1bb7423c18143e441a3de825e249e38a3 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5032,6 +5032,10 @@ def reduce_all(input, dim=None, keep_dim=False, name=None): """ if dim is not None and not isinstance(dim, list): dim = [dim] + + if in_dygraph_mode(): + return _C_ops.all(input, dim if dim != None else [], keep_dim) + check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all') helper = LayerHelper('reduce_all', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())