You need to sign in or sign up before continuing.
未验证 提交 f62bd3b4 编写于 作者: H huangjiyi 提交者: GitHub

[PHI decoupling] move "paddle/fluid/operators/math.h" to phi (#48062)

* rm "paddle/fluid/operators/math.h" in phi

* rm "paddle/fluid/operators/math.h" in fluit
上级 e5ed5257
...@@ -15,9 +15,9 @@ limitations under the License. */ ...@@ -15,9 +15,9 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/platform/for_range.h" #include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/kernels/funcs/math.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle { namespace paddle {
...@@ -190,7 +190,7 @@ struct HardLabelCrossEntropyForwardFunctor { ...@@ -190,7 +190,7 @@ struct HardLabelCrossEntropyForwardFunctor {
label); label);
auto match_x = x_[idx * feature_size_ + label]; auto match_x = x_[idx * feature_size_ + label];
y_[idx] = -math::TolerableValue<T>()(real_log(match_x)); y_[idx] = -math::TolerableValue<T>()(phi::funcs::real_log(match_x));
match_x_[idx] = match_x; match_x_[idx] = match_x;
} else { } else {
y_[idx] = 0; y_[idx] = 0;
......
...@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/dequantize_log_op.h" #include "paddle/fluid/operators/dequantize_log_op.h"
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/hostdevice.h"
......
...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/detection/sigmoid_focal_loss_op.h" #include "paddle/fluid/operators/detection/sigmoid_focal_loss_op.h"
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/kernels/funcs/math.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -55,15 +55,16 @@ __global__ void GPUSigmoidFocalLossForward(const T *x_data, ...@@ -55,15 +55,16 @@ __global__ void GPUSigmoidFocalLossForward(const T *x_data,
T s_pos = alpha / fg_num; T s_pos = alpha / fg_num;
// p = 1. / 1. + expf(-x) // p = 1. / 1. + expf(-x)
T p = 1. / (1. + real_exp(-x)); T p = 1. / (1. + phi::funcs::real_exp(-x));
// (1 - p)**gamma * log(p) // (1 - p)**gamma * log(p)
T term_pos = std::pow(static_cast<T>(1. - p), gamma) * T term_pos = std::pow(static_cast<T>(1. - p), gamma) *
real_log(p > FLT_MIN ? p : FLT_MIN); phi::funcs::real_log(p > FLT_MIN ? p : FLT_MIN);
// p**gamma * log(1 - p) // p**gamma * log(1 - p)
T term_neg = T term_neg = std::pow(p, gamma) *
std::pow(p, gamma) * (-1. * x * (x >= 0) -
(-1. * x * (x >= 0) - real_log(1. + real_exp(x - 2. * x * (x >= 0)))); phi::funcs::real_log(
1. + phi::funcs::real_exp(x - 2. * x * (x >= 0))));
out_data[i] = 0.0; out_data[i] = 0.0;
out_data[i] += -c_pos * term_pos * s_pos; out_data[i] += -c_pos * term_pos * s_pos;
...@@ -96,17 +97,20 @@ __global__ void GPUSigmoidFocalLossBackward(const T *x_data, ...@@ -96,17 +97,20 @@ __global__ void GPUSigmoidFocalLossBackward(const T *x_data,
T c_pos = static_cast<T>(g == (d + 1)); T c_pos = static_cast<T>(g == (d + 1));
T c_neg = static_cast<T>((g != -1) & (g != (d + 1))); T c_neg = static_cast<T>((g != -1) & (g != (d + 1)));
T p = 1. / (1. + real_exp(-x)); T p = 1. / (1. + phi::funcs::real_exp(-x));
// (1-p)**g * (1 - p - g*p*log(p)) // (1-p)**g * (1 - p - g*p*log(p))
T term_pos = std::pow(static_cast<T>(1. - p), gamma) * T term_pos =
(1. - p - (p * gamma * real_log(p > FLT_MIN ? p : FLT_MIN))); std::pow(static_cast<T>(1. - p), gamma) *
(1. - p -
(p * gamma * phi::funcs::real_log(p > FLT_MIN ? p : FLT_MIN)));
// (p**g) * (g*(1-p)*log(1-p) - p) // (p**g) * (g*(1-p)*log(1-p) - p)
T term_neg = T term_neg = std::pow(p, gamma) *
std::pow(p, gamma) * ((-1. * x * (x >= 0) -
((-1. * x * (x >= 0) - real_log(1. + real_exp(x - 2. * x * (x >= 0)))) * phi::funcs::real_log(
(1. - p) * gamma - 1. + phi::funcs::real_exp(x - 2. * x * (x >= 0)))) *
p); (1. - p) * gamma -
p);
dx_data[i] = 0.0; dx_data[i] = 0.0;
dx_data[i] += -c_pos * s_pos * term_pos; dx_data[i] += -c_pos * s_pos * term_pos;
......
...@@ -14,10 +14,10 @@ limitations under the License. */ ...@@ -14,10 +14,10 @@ limitations under the License. */
#include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/kernels/funcs/math.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -39,9 +39,10 @@ __global__ void CrossEntropyKernel(T* Y, ...@@ -39,9 +39,10 @@ __global__ void CrossEntropyKernel(T* Y,
D, D,
ignore_index, ignore_index,
lbl); lbl);
Y[i] = ignore_index == lbl Y[i] =
? static_cast<T>(0) ignore_index == lbl
: -math::TolerableValue<T>()(real_log(X[i * D + lbl])); ? static_cast<T>(0)
: -math::TolerableValue<T>()(phi::funcs::real_log(X[i * D + lbl]));
} }
} }
...@@ -56,7 +57,7 @@ __global__ void SoftCrossEntropyKernel(T* Y, ...@@ -56,7 +57,7 @@ __global__ void SoftCrossEntropyKernel(T* Y,
int idx = blockIdx.x * class_num + tid; int idx = blockIdx.x * class_num + tid;
int end = blockIdx.x * class_num + class_num; int end = blockIdx.x * class_num + class_num;
for (; idx < end; idx += blockDim.x) { for (; idx < end; idx += blockDim.x) {
val += math::TolerableValue<T>()(real_log(X[idx])) * label[idx]; val += math::TolerableValue<T>()(phi::funcs::real_log(X[idx])) * label[idx];
} }
val = paddle::platform::reduceSum(val, tid, blockDim.x); val = paddle::platform::reduceSum(val, tid, blockDim.x);
...@@ -152,7 +153,7 @@ void CrossEntropyFunctor<DeviceContext, T>::operator()( ...@@ -152,7 +153,7 @@ void CrossEntropyFunctor<DeviceContext, T>::operator()(
template class CrossEntropyFunctor<phi::GPUContext, float>; template class CrossEntropyFunctor<phi::GPUContext, float>;
template class CrossEntropyFunctor<phi::GPUContext, double>; template class CrossEntropyFunctor<phi::GPUContext, double>;
template class CrossEntropyFunctor<phi::GPUContext, platform::float16>; template class CrossEntropyFunctor<phi::GPUContext, phi::dtype::float16>;
} // namespace math } // namespace math
} // namespace operators } // namespace operators
......
...@@ -23,8 +23,8 @@ limitations under the License. */ ...@@ -23,8 +23,8 @@ limitations under the License. */
namespace cub = hipcub; namespace cub = hipcub;
#endif #endif
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/operators/sequence_ops/sequence_softmax_op.h" #include "paddle/fluid/operators/sequence_ops/sequence_softmax_op.h"
#include "paddle/phi/kernels/funcs/math.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -67,7 +67,7 @@ __global__ void sequence_softmax_kernel(const T *in_data, ...@@ -67,7 +67,7 @@ __global__ void sequence_softmax_kernel(const T *in_data,
T sum_data = 0; T sum_data = 0;
for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { for (int tid = threadIdx.x; tid < span; tid += blockDim.x) {
T ele = in_data[start + tid]; T ele = in_data[start + tid];
sum_data += real_exp(ele - shared_max_data); sum_data += phi::funcs::real_exp(ele - shared_max_data);
} }
sum_data = sum_data =
BlockReduce<T, BlockDim>(temp_storage).Reduce(sum_data, cub::Sum()); BlockReduce<T, BlockDim>(temp_storage).Reduce(sum_data, cub::Sum());
...@@ -79,7 +79,7 @@ __global__ void sequence_softmax_kernel(const T *in_data, ...@@ -79,7 +79,7 @@ __global__ void sequence_softmax_kernel(const T *in_data,
// get final resit // get final resit
for (int tid = threadIdx.x; tid < span; tid += blockDim.x) { for (int tid = threadIdx.x; tid < span; tid += blockDim.x) {
T ele = in_data[start + tid]; T ele = in_data[start + tid];
ele = real_exp(ele - shared_max_data) / shared_sum_data; ele = phi::funcs::real_exp(ele - shared_max_data) / shared_sum_data;
out_data[start + tid] = ele; out_data[start + tid] = ele;
} }
} }
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
#include <algorithm> // for max #include <algorithm> // for max
#include "paddle/fluid/operators/math.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math.h"
namespace phi { namespace phi {
...@@ -47,10 +47,9 @@ void BCELossKernel(const Context& dev_ctx, ...@@ -47,10 +47,9 @@ void BCELossKernel(const Context& dev_ctx,
"Illegal input, input must be less than or equal to 1")); "Illegal input, input must be less than or equal to 1"));
out_data[i] = out_data[i] =
(label_data[i] - static_cast<T>(1)) * (label_data[i] - static_cast<T>(1)) *
std::max(paddle::operators::real_log(static_cast<T>(1) - x_data[i]), std::max(phi::funcs::real_log(static_cast<T>(1) - x_data[i]),
(T)(-100)) - (T)(-100)) -
label_data[i] * label_data[i] * std::max(phi::funcs::real_log(x_data[i]), (T)(-100));
std::max(paddle::operators::real_log(x_data[i]), (T)(-100));
} }
} }
......
...@@ -17,9 +17,9 @@ ...@@ -17,9 +17,9 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include "paddle/fluid/operators/math.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math.h"
namespace phi { namespace phi {
template <typename T> template <typename T>
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/math.h" #include "paddle/phi/kernels/funcs/math.h"
namespace phi { namespace phi {
namespace funcs { namespace funcs {
...@@ -89,8 +89,7 @@ struct TanhFunctor { ...@@ -89,8 +89,7 @@ struct TanhFunctor {
// y = 2 / (1 + e^-2x) - 1 // y = 2 / (1 + e^-2x) - 1
T t0 = static_cast<T>(2) * x; T t0 = static_cast<T>(2) * x;
T t1 = (t0 < kMin) ? kMin : ((t0 > kMax) ? kMax : t0); T t1 = (t0 < kMin) ? kMin : ((t0 > kMax) ? kMax : t0);
return static_cast<T>(2) / return static_cast<T>(2) / (static_cast<T>(1) + phi::funcs::real_exp(-t1)) -
(static_cast<T>(1) + paddle::operators::real_exp(-t1)) -
static_cast<T>(1); static_cast<T>(1);
} }
}; };
...@@ -111,8 +110,7 @@ struct SigmoidFunctor { ...@@ -111,8 +110,7 @@ struct SigmoidFunctor {
inline HOSTDEVICE T operator()(T x) { inline HOSTDEVICE T operator()(T x) {
// y = 1 / (1 + e^-x) // y = 1 / (1 + e^-x)
T tmp = (x < kMin) ? kMin : ((x > kMax) ? kMax : x); T tmp = (x < kMin) ? kMin : ((x > kMax) ? kMax : x);
return static_cast<T>(1) / return static_cast<T>(1) / (static_cast<T>(1) + phi::funcs::real_exp(-tmp));
(static_cast<T>(1) + paddle::operators::real_exp(-tmp));
} }
}; };
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
...@@ -15,22 +15,22 @@ ...@@ -15,22 +15,22 @@
#pragma once #pragma once
#include "math.h" // NOLINT #include "math.h" // NOLINT
#include "paddle/fluid/platform/float16.h" #include "paddle/phi/common/float16.h"
#include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/hostdevice.h"
namespace paddle { namespace phi {
namespace operators { namespace funcs {
inline HOSTDEVICE platform::float16 real_exp(platform::float16 x) { inline HOSTDEVICE phi::dtype::float16 real_exp(phi::dtype::float16 x) {
return static_cast<platform::float16>(::expf(static_cast<float>(x))); return static_cast<phi::dtype::float16>(::expf(static_cast<float>(x)));
} }
inline HOSTDEVICE float real_exp(float x) { return ::expf(x); } inline HOSTDEVICE float real_exp(float x) { return ::expf(x); }
inline HOSTDEVICE double real_exp(double x) { return ::exp(x); } inline HOSTDEVICE double real_exp(double x) { return ::exp(x); }
inline HOSTDEVICE platform::float16 real_log(platform::float16 x) { inline HOSTDEVICE phi::dtype::float16 real_log(phi::dtype::float16 x) {
return static_cast<platform::float16>(::logf(static_cast<float>(x))); return static_cast<phi::dtype::float16>(::logf(static_cast<float>(x)));
} }
inline HOSTDEVICE float real_log(float x) { return ::logf(x); } inline HOSTDEVICE float real_log(float x) { return ::logf(x); }
...@@ -41,5 +41,5 @@ inline HOSTDEVICE float real_min(float x, float y) { return ::fminf(x, y); } ...@@ -41,5 +41,5 @@ inline HOSTDEVICE float real_min(float x, float y) { return ::fminf(x, y); }
inline HOSTDEVICE double real_min(double x, double y) { return ::fmin(x, y); } inline HOSTDEVICE double real_min(double x, double y) { return ::fmin(x, y); }
} // namespace operators } // namespace funcs
} // namespace paddle } // namespace phi
...@@ -19,10 +19,10 @@ ...@@ -19,10 +19,10 @@
#include <functional> #include <functional>
#include <string> #include <string>
#include "paddle/fluid/operators/math.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/kernels/funcs/math.h"
namespace phi { namespace phi {
static constexpr int kNumCUDAThreads = 512; static constexpr int kNumCUDAThreads = 512;
......
...@@ -17,13 +17,13 @@ ...@@ -17,13 +17,13 @@
#include <algorithm> #include <algorithm>
#include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/math.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_helper.h" #include "paddle/phi/backends/gpu/gpu_helper.h"
#include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h" #include "paddle/phi/kernels/funcs/elementwise_base.h"
#include "paddle/phi/kernels/funcs/math.h"
#include "paddle/phi/kernels/gpu/reduce.h" #include "paddle/phi/kernels/gpu/reduce.h"
#ifdef __NVCC__ #ifdef __NVCC__
......
...@@ -37,8 +37,8 @@ struct SigmoidBwdFunctor { ...@@ -37,8 +37,8 @@ struct SigmoidBwdFunctor {
dx_data = static_cast<T>(0.); dx_data = static_cast<T>(0.);
counts = 0; counts = 0;
} else { } else {
T simoid_x = static_cast<T>(1) / T simoid_x =
(static_cast<T>(1) + paddle::operators::real_exp(-x)); static_cast<T>(1) / (static_cast<T>(1) + phi::funcs::real_exp(-x));
T diff = simoid_x - label; T diff = simoid_x - label;
dx_data = dout * diff; dx_data = dout * diff;
counts = 1; counts = 1;
......
...@@ -37,9 +37,8 @@ struct SigmoidFwdFunctor { ...@@ -37,9 +37,8 @@ struct SigmoidFwdFunctor {
} else { } else {
T term1 = (x > 0) ? x : 0; T term1 = (x > 0) ? x : 0;
T term2 = x * label; T term2 = x * label;
T term3 = paddle::operators::real_log( T term3 = phi::funcs::real_log(
static_cast<T>(1) + static_cast<T>(1) + phi::funcs::real_exp(static_cast<T>(-abs(x))));
paddle::operators::real_exp(static_cast<T>(-abs(x))));
out_data = term1 - term2 + term3; out_data = term1 - term2 + term3;
counts = 1; counts = 1;
......
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
#pragma once #pragma once
#include <string> #include <string>
#include "paddle/fluid/operators/math.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/math.h"
namespace phi { namespace phi {
...@@ -32,7 +32,7 @@ struct SeluFunctor { ...@@ -32,7 +32,7 @@ struct SeluFunctor {
HOSTDEVICE void operator()(size_t idx) const { HOSTDEVICE void operator()(size_t idx) const {
T x_ele = x_data_ptr_[idx]; T x_ele = x_data_ptr_[idx];
if (x_ele <= 0) { if (x_ele <= 0) {
x_ele = alpha_ * paddle::operators::real_exp(x_ele) - alpha_; x_ele = alpha_ * phi::funcs::real_exp(x_ele) - alpha_;
} }
y_data_ptr_[idx] = scale_ * x_ele; y_data_ptr_[idx] = scale_ * x_ele;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册