From cecea8e6f589efc23cf2a18b551987e1316b67c4 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Wed, 15 Dec 2021 19:52:03 +0800 Subject: [PATCH] Change a comment in pten header to avoid the disturb to op benchmark ci. (#38165) test=document_fix --- paddle/pten/kernels/hybird/cpu/elementwise.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/paddle/pten/kernels/hybird/cpu/elementwise.h b/paddle/pten/kernels/hybird/cpu/elementwise.h index e8213c8b45d..d503957a762 100644 --- a/paddle/pten/kernels/hybird/cpu/elementwise.h +++ b/paddle/pten/kernels/hybird/cpu/elementwise.h @@ -130,16 +130,15 @@ void CommonElementwiseBroadcastForward( is_xsize_larger); } -// It is a common implementation to compute binary calculation with the support -// of broadcast, supporting both CPU and GPU. -// - CPU implementation cannot support the case when x needs broadcast, thus -// this function need to be called with XxxFunctor and XxxInverseFunctor, -// like paddle/fluid/operators/elementwise/elementwise_add_op.h#L49 - L55. -// - GPU implementation supports all the broadcast cases, thus there is no need -// to define and call with XxxInverseFunctor. +// It is a common CPU implementation to compute binary calculation with the +// support of broadcast. Note: +// 1. CPU implementation cannot support the case when x needs broadcast, thus +// this function need to be called with XxxFunctor and XxxInverseFunctor, +// like AddFunctor and InverseAddFunctor. +// 2. The corresponding GPU implementation supports all the broadcast cases, +// thus there is no need to define and call with XxxInverseFunctor. // TODO(liuyiqun): optimize the CPU implementation to support all broadcast // cases and avoid the need of XxxInverseFunctor. - template void ElementwiseCompute(const paddle::platform::CPUDeviceContext &dev_ctx, const DenseTensor &x, -- GitLab