From 60ffc22026f43afd34a90b0b06d43fe7e0b2701b Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Tue, 1 Sep 2020 19:22:38 +0800 Subject: [PATCH] Refine bernoulli and unsqueeze op (#26842) * add check for bernoulli and register bool for unsqueeze * follow comments --- paddle/fluid/operators/bernoulli_op.cu | 4 ++++ paddle/fluid/operators/bernoulli_op.h | 10 ++++++---- paddle/fluid/operators/unsqueeze_op.cc | 6 ++++++ paddle/fluid/operators/unsqueeze_op.cu.cc | 4 ++++ python/paddle/tensor/manipulation.py | 4 ++-- 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/bernoulli_op.cu b/paddle/fluid/operators/bernoulli_op.cu index d0837071d4..f665d2dd0e 100644 --- a/paddle/fluid/operators/bernoulli_op.cu +++ b/paddle/fluid/operators/bernoulli_op.cu @@ -31,6 +31,10 @@ struct BernoulliCudaFunctor { __host__ __device__ BernoulliCudaFunctor(int seed) : seed_(seed) {} __host__ __device__ T operator()(const unsigned int n, const T p) const { + // NOTE(zhiqiu): currently, PADDLE_ENFORCE in cuda kernel may print several + // lines of error messages if, and it should be refined. + PADDLE_ENFORCE(p >= 0.0 && p <= 1.0, + "The probability should be >=0 and <= 1, but got %f", p); thrust::minstd_rand rng; rng.seed(seed_); thrust::uniform_real_distribution dist(0.0, 1.0); diff --git a/paddle/fluid/operators/bernoulli_op.h b/paddle/fluid/operators/bernoulli_op.h index 06a83ada17..40f285d11f 100644 --- a/paddle/fluid/operators/bernoulli_op.h +++ b/paddle/fluid/operators/bernoulli_op.h @@ -25,10 +25,12 @@ namespace operators { template inline HOSTDEVICE T BernoulliFunctor(T p, T rand) { - PADDLE_ENFORCE_LE(p, 1, platform::errors::OutOfRange( - "The probability should be <= 1, but got %f", p)); - PADDLE_ENFORCE_GE(p, 0, platform::errors::OutOfRange( - "The probability should be >= 1, but got %f", p)); + PADDLE_ENFORCE_LE(p, 1.0, + platform::errors::OutOfRange( + "The probability should be <= 1, but got %f", p)); + PADDLE_ENFORCE_GE(p, 0.0, + platform::errors::OutOfRange( + "The probability should be >= 0, but got %f", p)); return static_cast(rand < p); } diff --git a/paddle/fluid/operators/unsqueeze_op.cc b/paddle/fluid/operators/unsqueeze_op.cc index c33e7c6068..ee1361e361 100644 --- a/paddle/fluid/operators/unsqueeze_op.cc +++ b/paddle/fluid/operators/unsqueeze_op.cc @@ -13,9 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/unsqueeze_op.h" + #include #include #include + #include "paddle/fluid/framework/op_registry.h" namespace paddle { @@ -327,6 +329,7 @@ REGISTER_OPERATOR(unsqueeze2_grad, ops::Unsqueeze2GradOp, REGISTER_OP_CPU_KERNEL( unsqueeze, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel); @@ -334,12 +337,14 @@ REGISTER_OP_CPU_KERNEL( unsqueeze_grad, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, + ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel); REGISTER_OP_CPU_KERNEL( unsqueeze2, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel); @@ -347,6 +352,7 @@ REGISTER_OP_CPU_KERNEL( unsqueeze2_grad, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, + ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel); diff --git a/paddle/fluid/operators/unsqueeze_op.cu.cc b/paddle/fluid/operators/unsqueeze_op.cu.cc index 3258de53b8..0e8f47a692 100644 --- a/paddle/fluid/operators/unsqueeze_op.cu.cc +++ b/paddle/fluid/operators/unsqueeze_op.cu.cc @@ -21,6 +21,7 @@ REGISTER_OP_CUDA_KERNEL( unsqueeze, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel); @@ -30,6 +31,7 @@ REGISTER_OP_CUDA_KERNEL( ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, + ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel); @@ -38,6 +40,7 @@ REGISTER_OP_CUDA_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel); @@ -47,6 +50,7 @@ REGISTER_OP_CUDA_KERNEL( ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, + ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel); diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 845d2cf4d1..5a01fff88c 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -433,8 +433,8 @@ def stack(x, axis=0, name=None): [5.0, 6.0] ] ] Args: - x (Tensor|list[Tensor]): Input ``x`` can be a single tensor, or a ``list`` of tensors. - If ``x`` is a ``list``, the Tensors in ``x`` + x (Tensor|list[Tensor]|tuple[Tensor]): Input ``x`` can be a single tensor, or a ``list`` or ``tuple`` of tensors. + If ``x`` is a ``list`` or ``tuple`` , the Tensors in ``x`` must be of the same shape and dtype. Supported data types: float32, float64, int32, int64. axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``, where ``R`` is the number of dimensions of the first input tensor ``x[0]``. -- GitLab