From f62bd3b490b151fce074d1cd11389161b1b0acbd Mon Sep 17 00:00:00 2001 From: huangjiyi <43315610+huangjiyi@users.noreply.github.com> Date: Thu, 17 Nov 2022 11:29:36 +0800 Subject: [PATCH] [PHI decoupling] move "paddle/fluid/operators/math.h" to phi (#48062) * rm "paddle/fluid/operators/math.h" in phi * rm "paddle/fluid/operators/math.h" in fluit --- paddle/fluid/operators/cross_entropy_op.h | 4 +-- paddle/fluid/operators/dequantize_log_op.cu | 1 - .../detection/sigmoid_focal_loss_op.cu | 32 +++++++++++-------- paddle/fluid/operators/math/cross_entropy.cu | 13 ++++---- .../sequence_ops/sequence_softmax_op.cu | 6 ++-- paddle/phi/kernels/cpu/bce_loss_kernel.cc | 7 ++-- .../phi/kernels/cpu/nll_loss_grad_kernel.cc | 2 +- paddle/phi/kernels/funcs/functors.h | 8 ++--- .../operators => phi/kernels/funcs}/math.h | 20 ++++++------ paddle/phi/kernels/gpu/nll_loss.h | 2 +- .../gpu/sigmoid_cross_entropy_with_logits.h | 2 +- ...d_cross_entropy_with_logits_grad_kernel.cu | 4 +-- ...igmoid_cross_entropy_with_logits_kernel.cu | 5 ++- paddle/phi/kernels/impl/selu_kernel_impl.h | 4 +-- 14 files changed, 55 insertions(+), 55 deletions(-) rename paddle/{fluid/operators => phi/kernels/funcs}/math.h (69%) diff --git a/paddle/fluid/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h index 2949dc8d1f..4dcaf7b99f 100644 --- a/paddle/fluid/operators/cross_entropy_op.h +++ b/paddle/fluid/operators/cross_entropy_op.h @@ -15,9 +15,9 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/platform/for_range.h" +#include "paddle/phi/kernels/funcs/math.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { @@ -190,7 +190,7 @@ struct HardLabelCrossEntropyForwardFunctor { label); auto match_x = x_[idx * feature_size_ + label]; - y_[idx] = -math::TolerableValue()(real_log(match_x)); + y_[idx] = -math::TolerableValue()(phi::funcs::real_log(match_x)); match_x_[idx] = match_x; } else { y_[idx] = 0; diff --git a/paddle/fluid/operators/dequantize_log_op.cu b/paddle/fluid/operators/dequantize_log_op.cu index 360871f9e7..4a1976f6fd 100644 --- a/paddle/fluid/operators/dequantize_log_op.cu +++ b/paddle/fluid/operators/dequantize_log_op.cu @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/dequantize_log_op.h" -#include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/core/hostdevice.h" diff --git a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu index bad93fd22b..76a47581e9 100644 --- a/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu +++ b/paddle/fluid/operators/detection/sigmoid_focal_loss_op.cu @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detection/sigmoid_focal_loss_op.h" -#include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/core/hostdevice.h" +#include "paddle/phi/kernels/funcs/math.h" namespace paddle { namespace operators { @@ -55,15 +55,16 @@ __global__ void GPUSigmoidFocalLossForward(const T *x_data, T s_pos = alpha / fg_num; // p = 1. / 1. + expf(-x) - T p = 1. / (1. + real_exp(-x)); + T p = 1. / (1. + phi::funcs::real_exp(-x)); // (1 - p)**gamma * log(p) T term_pos = std::pow(static_cast(1. - p), gamma) * - real_log(p > FLT_MIN ? p : FLT_MIN); + phi::funcs::real_log(p > FLT_MIN ? p : FLT_MIN); // p**gamma * log(1 - p) - T term_neg = - std::pow(p, gamma) * - (-1. * x * (x >= 0) - real_log(1. + real_exp(x - 2. * x * (x >= 0)))); + T term_neg = std::pow(p, gamma) * + (-1. * x * (x >= 0) - + phi::funcs::real_log( + 1. + phi::funcs::real_exp(x - 2. * x * (x >= 0)))); out_data[i] = 0.0; out_data[i] += -c_pos * term_pos * s_pos; @@ -96,17 +97,20 @@ __global__ void GPUSigmoidFocalLossBackward(const T *x_data, T c_pos = static_cast(g == (d + 1)); T c_neg = static_cast((g != -1) & (g != (d + 1))); - T p = 1. / (1. + real_exp(-x)); + T p = 1. / (1. + phi::funcs::real_exp(-x)); // (1-p)**g * (1 - p - g*p*log(p)) - T term_pos = std::pow(static_cast(1. - p), gamma) * - (1. - p - (p * gamma * real_log(p > FLT_MIN ? p : FLT_MIN))); + T term_pos = + std::pow(static_cast(1. - p), gamma) * + (1. - p - + (p * gamma * phi::funcs::real_log(p > FLT_MIN ? p : FLT_MIN))); // (p**g) * (g*(1-p)*log(1-p) - p) - T term_neg = - std::pow(p, gamma) * - ((-1. * x * (x >= 0) - real_log(1. + real_exp(x - 2. * x * (x >= 0)))) * - (1. - p) * gamma - - p); + T term_neg = std::pow(p, gamma) * + ((-1. * x * (x >= 0) - + phi::funcs::real_log( + 1. + phi::funcs::real_exp(x - 2. * x * (x >= 0)))) * + (1. - p) * gamma - + p); dx_data[i] = 0.0; dx_data[i] += -c_pos * s_pos * term_pos; diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index 0e5b955424..478c4e0cd6 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -14,10 +14,10 @@ limitations under the License. */ #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/framework/convert_utils.h" -#include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/kernels/funcs/math.h" namespace paddle { namespace operators { @@ -39,9 +39,10 @@ __global__ void CrossEntropyKernel(T* Y, D, ignore_index, lbl); - Y[i] = ignore_index == lbl - ? static_cast(0) - : -math::TolerableValue()(real_log(X[i * D + lbl])); + Y[i] = + ignore_index == lbl + ? static_cast(0) + : -math::TolerableValue()(phi::funcs::real_log(X[i * D + lbl])); } } @@ -56,7 +57,7 @@ __global__ void SoftCrossEntropyKernel(T* Y, int idx = blockIdx.x * class_num + tid; int end = blockIdx.x * class_num + class_num; for (; idx < end; idx += blockDim.x) { - val += math::TolerableValue()(real_log(X[idx])) * label[idx]; + val += math::TolerableValue()(phi::funcs::real_log(X[idx])) * label[idx]; } val = paddle::platform::reduceSum(val, tid, blockDim.x); @@ -152,7 +153,7 @@ void CrossEntropyFunctor::operator()( template class CrossEntropyFunctor; template class CrossEntropyFunctor; -template class CrossEntropyFunctor; +template class CrossEntropyFunctor; } // namespace math } // namespace operators diff --git a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cu b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cu index 29f562ec5e..e58cff60ae 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cu @@ -23,8 +23,8 @@ limitations under the License. */ namespace cub = hipcub; #endif -#include "paddle/fluid/operators/math.h" #include "paddle/fluid/operators/sequence_ops/sequence_softmax_op.h" +#include "paddle/phi/kernels/funcs/math.h" namespace paddle { namespace operators { @@ -67,7 +67,7 @@ __global__ void sequence_softmax_kernel(const T *in_data, T sum_data = 0; for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { T ele = in_data[start + tid]; - sum_data += real_exp(ele - shared_max_data); + sum_data += phi::funcs::real_exp(ele - shared_max_data); } sum_data = BlockReduce(temp_storage).Reduce(sum_data, cub::Sum()); @@ -79,7 +79,7 @@ __global__ void sequence_softmax_kernel(const T *in_data, // get final resit for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { T ele = in_data[start + tid]; - ele = real_exp(ele - shared_max_data) / shared_sum_data; + ele = phi::funcs::real_exp(ele - shared_max_data) / shared_sum_data; out_data[start + tid] = ele; } } diff --git a/paddle/phi/kernels/cpu/bce_loss_kernel.cc b/paddle/phi/kernels/cpu/bce_loss_kernel.cc index 9d62fabcbe..7b98016201 100644 --- a/paddle/phi/kernels/cpu/bce_loss_kernel.cc +++ b/paddle/phi/kernels/cpu/bce_loss_kernel.cc @@ -16,9 +16,9 @@ #include // for max -#include "paddle/fluid/operators/math.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/math.h" namespace phi { @@ -47,10 +47,9 @@ void BCELossKernel(const Context& dev_ctx, "Illegal input, input must be less than or equal to 1")); out_data[i] = (label_data[i] - static_cast(1)) * - std::max(paddle::operators::real_log(static_cast(1) - x_data[i]), + std::max(phi::funcs::real_log(static_cast(1) - x_data[i]), (T)(-100)) - - label_data[i] * - std::max(paddle::operators::real_log(x_data[i]), (T)(-100)); + label_data[i] * std::max(phi::funcs::real_log(x_data[i]), (T)(-100)); } } diff --git a/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc b/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc index 9048e87d04..c84b3d4efb 100644 --- a/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/nll_loss_grad_kernel.cc @@ -17,9 +17,9 @@ #include #include -#include "paddle/fluid/operators/math.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/math.h" namespace phi { template diff --git a/paddle/phi/kernels/funcs/functors.h b/paddle/phi/kernels/funcs/functors.h index d518a877b2..2e6fe8b2d7 100644 --- a/paddle/phi/kernels/funcs/functors.h +++ b/paddle/phi/kernels/funcs/functors.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/operators/amp/fp16_type_traits.h" -#include "paddle/fluid/operators/math.h" +#include "paddle/phi/kernels/funcs/math.h" namespace phi { namespace funcs { @@ -89,8 +89,7 @@ struct TanhFunctor { // y = 2 / (1 + e^-2x) - 1 T t0 = static_cast(2) * x; T t1 = (t0 < kMin) ? kMin : ((t0 > kMax) ? kMax : t0); - return static_cast(2) / - (static_cast(1) + paddle::operators::real_exp(-t1)) - + return static_cast(2) / (static_cast(1) + phi::funcs::real_exp(-t1)) - static_cast(1); } }; @@ -111,8 +110,7 @@ struct SigmoidFunctor { inline HOSTDEVICE T operator()(T x) { // y = 1 / (1 + e^-x) T tmp = (x < kMin) ? kMin : ((x > kMax) ? kMax : x); - return static_cast(1) / - (static_cast(1) + paddle::operators::real_exp(-tmp)); + return static_cast(1) / (static_cast(1) + phi::funcs::real_exp(-tmp)); } }; diff --git a/paddle/fluid/operators/math.h b/paddle/phi/kernels/funcs/math.h similarity index 69% rename from paddle/fluid/operators/math.h rename to paddle/phi/kernels/funcs/math.h index 47281fb028..f8c373badf 100644 --- a/paddle/fluid/operators/math.h +++ b/paddle/phi/kernels/funcs/math.h @@ -1,4 +1,4 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,22 +15,22 @@ #pragma once #include "math.h" // NOLINT -#include "paddle/fluid/platform/float16.h" +#include "paddle/phi/common/float16.h" #include "paddle/phi/core/hostdevice.h" -namespace paddle { -namespace operators { +namespace phi { +namespace funcs { -inline HOSTDEVICE platform::float16 real_exp(platform::float16 x) { - return static_cast(::expf(static_cast(x))); +inline HOSTDEVICE phi::dtype::float16 real_exp(phi::dtype::float16 x) { + return static_cast(::expf(static_cast(x))); } inline HOSTDEVICE float real_exp(float x) { return ::expf(x); } inline HOSTDEVICE double real_exp(double x) { return ::exp(x); } -inline HOSTDEVICE platform::float16 real_log(platform::float16 x) { - return static_cast(::logf(static_cast(x))); +inline HOSTDEVICE phi::dtype::float16 real_log(phi::dtype::float16 x) { + return static_cast(::logf(static_cast(x))); } inline HOSTDEVICE float real_log(float x) { return ::logf(x); } @@ -41,5 +41,5 @@ inline HOSTDEVICE float real_min(float x, float y) { return ::fminf(x, y); } inline HOSTDEVICE double real_min(double x, double y) { return ::fmin(x, y); } -} // namespace operators -} // namespace paddle +} // namespace funcs +} // namespace phi diff --git a/paddle/phi/kernels/gpu/nll_loss.h b/paddle/phi/kernels/gpu/nll_loss.h index 37a67b4767..9d063d0ef4 100644 --- a/paddle/phi/kernels/gpu/nll_loss.h +++ b/paddle/phi/kernels/gpu/nll_loss.h @@ -19,10 +19,10 @@ #include #include -#include "paddle/fluid/operators/math.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/hostdevice.h" +#include "paddle/phi/kernels/funcs/math.h" namespace phi { static constexpr int kNumCUDAThreads = 512; diff --git a/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits.h b/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits.h index 84a24449b3..1cc025bac4 100644 --- a/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits.h +++ b/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits.h @@ -17,13 +17,13 @@ #include #include "paddle/fluid/memory/malloc.h" -#include "paddle/fluid/operators/math.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_helper.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/funcs/elementwise_base.h" +#include "paddle/phi/kernels/funcs/math.h" #include "paddle/phi/kernels/gpu/reduce.h" #ifdef __NVCC__ diff --git a/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits_grad_kernel.cu b/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits_grad_kernel.cu index f61cd2c396..736c5608a6 100644 --- a/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits_grad_kernel.cu @@ -37,8 +37,8 @@ struct SigmoidBwdFunctor { dx_data = static_cast(0.); counts = 0; } else { - T simoid_x = static_cast(1) / - (static_cast(1) + paddle::operators::real_exp(-x)); + T simoid_x = + static_cast(1) / (static_cast(1) + phi::funcs::real_exp(-x)); T diff = simoid_x - label; dx_data = dout * diff; counts = 1; diff --git a/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits_kernel.cu b/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits_kernel.cu index b0e9efe5bb..fb0183ce1e 100644 --- a/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits_kernel.cu +++ b/paddle/phi/kernels/gpu/sigmoid_cross_entropy_with_logits_kernel.cu @@ -37,9 +37,8 @@ struct SigmoidFwdFunctor { } else { T term1 = (x > 0) ? x : 0; T term2 = x * label; - T term3 = paddle::operators::real_log( - static_cast(1) + - paddle::operators::real_exp(static_cast(-abs(x)))); + T term3 = phi::funcs::real_log( + static_cast(1) + phi::funcs::real_exp(static_cast(-abs(x)))); out_data = term1 - term2 + term3; counts = 1; diff --git a/paddle/phi/kernels/impl/selu_kernel_impl.h b/paddle/phi/kernels/impl/selu_kernel_impl.h index c5d756e6eb..14789a7d61 100644 --- a/paddle/phi/kernels/impl/selu_kernel_impl.h +++ b/paddle/phi/kernels/impl/selu_kernel_impl.h @@ -15,9 +15,9 @@ #pragma once #include -#include "paddle/fluid/operators/math.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/kernels/funcs/for_range.h" +#include "paddle/phi/kernels/funcs/math.h" namespace phi { @@ -32,7 +32,7 @@ struct SeluFunctor { HOSTDEVICE void operator()(size_t idx) const { T x_ele = x_data_ptr_[idx]; if (x_ele <= 0) { - x_ele = alpha_ * paddle::operators::real_exp(x_ele) - alpha_; + x_ele = alpha_ * phi::funcs::real_exp(x_ele) - alpha_; } y_data_ptr_[idx] = scale_ * x_ele; } -- GitLab