From 8164b97ad66a3a392134b4301cdd98f911291a57 Mon Sep 17 00:00:00 2001 From: huangjiyi <43315610+huangjiyi@users.noreply.github.com> Date: Thu, 10 Nov 2022 17:10:49 +0800 Subject: [PATCH] [PHI Decoupling] remove "paddle/fluid/platform/float16.h" and "paddle/fluid/platform/for_range.h" in phi. (#47817) * rm "paddle/fluid/platform/float16.h" in phi * rm "paddle/fluid/platform/for_range.h" in phi --- paddle/phi/kernels/cpu/fill_grad_kernel.cc | 2 +- paddle/phi/kernels/cpu/fill_kernel.cc | 2 +- paddle/phi/kernels/funcs/math_function.cc | 2 +- paddle/phi/kernels/funcs/math_function.cu | 2 +- paddle/phi/kernels/gpu/bmm_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/bmm_kernel.cu | 9 ++------- paddle/phi/kernels/gpu/cholesky_kernel.cu | 4 ++-- paddle/phi/kernels/gpu/fill_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/fill_kernel.cu | 2 +- paddle/phi/kernels/gpu/overlap_add_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/overlap_add_kernel.cu | 2 +- paddle/phi/kernels/gpu/set_value_kernel.cu | 4 ++-- paddle/phi/kernels/gpudnn/conv_grad_kernel.cu | 1 - paddle/phi/kernels/impl/atan2_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/atan2_kernel_impl.h | 2 +- paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h | 4 ++-- paddle/phi/kernels/impl/conv_cudnn_impl.h | 2 +- paddle/phi/kernels/impl/eye_kernel_impl.h | 4 ++-- paddle/phi/kernels/impl/selu_grad_kernel_impl.h | 2 +- paddle/phi/kernels/impl/selu_kernel_impl.h | 4 ++-- paddle/phi/kernels/xpu/bmm_grad_kernel.cc | 11 ++++------- paddle/phi/kernels/xpu/bmm_kernel.cc | 4 ++-- paddle/phi/kernels/xpu/xpu_api_wrapper.h | 2 +- 23 files changed, 32 insertions(+), 41 deletions(-) diff --git a/paddle/phi/kernels/cpu/fill_grad_kernel.cc b/paddle/phi/kernels/cpu/fill_grad_kernel.cc index 07448c85a5..fda723ee2b 100644 --- a/paddle/phi/kernels/cpu/fill_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/fill_grad_kernel.cc @@ -25,6 +25,6 @@ PD_REGISTER_KERNEL(fill_grad, double, int64_t, int, - paddle::platform::float16, + phi::dtype::float16, paddle::platform::bfloat16, bool) {} diff --git a/paddle/phi/kernels/cpu/fill_kernel.cc b/paddle/phi/kernels/cpu/fill_kernel.cc index adca39e6ab..7e8534a5fb 100644 --- a/paddle/phi/kernels/cpu/fill_kernel.cc +++ b/paddle/phi/kernels/cpu/fill_kernel.cc @@ -25,6 +25,6 @@ PD_REGISTER_KERNEL(fill, double, int64_t, int, - paddle::platform::float16, + phi::dtype::float16, paddle::platform::bfloat16, bool) {} diff --git a/paddle/phi/kernels/funcs/math_function.cc b/paddle/phi/kernels/funcs/math_function.cc index 756fd8782e..8c32f8bcd6 100644 --- a/paddle/phi/kernels/funcs/math_function.cc +++ b/paddle/phi/kernels/funcs/math_function.cc @@ -27,9 +27,9 @@ limitations under the License. */ #include #include "paddle/fluid/platform/bfloat16.h" -#include "paddle/fluid/platform/float16.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/common/data_type.h" +#include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function_impl.h" #include "unsupported/Eigen/CXX11/Tensor" diff --git a/paddle/phi/kernels/funcs/math_function.cu b/paddle/phi/kernels/funcs/math_function.cu index 7e7a6c30c3..decd668e3a 100644 --- a/paddle/phi/kernels/funcs/math_function.cu +++ b/paddle/phi/kernels/funcs/math_function.cu @@ -17,9 +17,9 @@ limitations under the License. */ #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/bfloat16.h" -#include "paddle/fluid/platform/float16.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/data_type.h" +#include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function_impl.h" diff --git a/paddle/phi/kernels/gpu/bmm_grad_kernel.cu b/paddle/phi/kernels/gpu/bmm_grad_kernel.cu index 1e51d373e0..2c77d0e900 100644 --- a/paddle/phi/kernels/gpu/bmm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/bmm_grad_kernel.cu @@ -24,4 +24,4 @@ PD_REGISTER_KERNEL(bmm_grad, phi::BmmGradKernel, float, double, - paddle::platform::float16) {} + phi::dtype::float16) {} diff --git a/paddle/phi/kernels/gpu/bmm_kernel.cu b/paddle/phi/kernels/gpu/bmm_kernel.cu index 36dfad3d6a..b7ec1ce330 100644 --- a/paddle/phi/kernels/gpu/bmm_kernel.cu +++ b/paddle/phi/kernels/gpu/bmm_kernel.cu @@ -18,10 +18,5 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/bmm_kernel_impl.h" -PD_REGISTER_KERNEL(bmm, - GPU, - ALL_LAYOUT, - phi::BmmKernel, - float, - double, - paddle::platform::float16) {} +PD_REGISTER_KERNEL( + bmm, GPU, ALL_LAYOUT, phi::BmmKernel, float, double, phi::dtype::float16) {} diff --git a/paddle/phi/kernels/gpu/cholesky_kernel.cu b/paddle/phi/kernels/gpu/cholesky_kernel.cu index 7c4a497703..41fc8387c4 100644 --- a/paddle/phi/kernels/gpu/cholesky_kernel.cu +++ b/paddle/phi/kernels/gpu/cholesky_kernel.cu @@ -23,10 +23,10 @@ limitations under the License. */ #include #include "paddle/fluid/memory/memory.h" -#include "paddle/fluid/platform/for_range.h" #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/for_range.h" namespace phi { @@ -130,7 +130,7 @@ void CholeskyKernel(const Context& dev_ctx, upper ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER; // portf is inplace, thus copy the triangular part of the input matrices to // the output and set the other triangular part to 0 firstly - paddle::platform::ForRange for_range(dev_ctx, tensor_size); + phi::funcs::ForRange for_range(dev_ctx, tensor_size); if (upper) { MatrixBandPartFunctor matrix_band_part_functor(m, m, diff --git a/paddle/phi/kernels/gpu/fill_grad_kernel.cu b/paddle/phi/kernels/gpu/fill_grad_kernel.cu index e18bb5c6db..512aef8c1c 100644 --- a/paddle/phi/kernels/gpu/fill_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/fill_grad_kernel.cu @@ -26,6 +26,6 @@ PD_REGISTER_KERNEL(fill_grad, double, int64_t, int, - paddle::platform::float16, + phi::dtype::float16, paddle::platform::bfloat16, bool) {} diff --git a/paddle/phi/kernels/gpu/fill_kernel.cu b/paddle/phi/kernels/gpu/fill_kernel.cu index 3fedb4118f..de46e67878 100644 --- a/paddle/phi/kernels/gpu/fill_kernel.cu +++ b/paddle/phi/kernels/gpu/fill_kernel.cu @@ -26,6 +26,6 @@ PD_REGISTER_KERNEL(fill, double, int64_t, int, - paddle::platform::float16, + phi::dtype::float16, paddle::platform::bfloat16, bool) {} diff --git a/paddle/phi/kernels/gpu/overlap_add_grad_kernel.cu b/paddle/phi/kernels/gpu/overlap_add_grad_kernel.cu index 4957ade2c3..5d68b503d8 100644 --- a/paddle/phi/kernels/gpu/overlap_add_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/overlap_add_grad_kernel.cu @@ -160,6 +160,6 @@ PD_REGISTER_KERNEL(overlap_add_grad, int64_t, float, double, - paddle::platform::float16, + phi::dtype::float16, paddle::platform::complex, paddle::platform::complex) {} diff --git a/paddle/phi/kernels/gpu/overlap_add_kernel.cu b/paddle/phi/kernels/gpu/overlap_add_kernel.cu index 47989ebcda..86d5920754 100644 --- a/paddle/phi/kernels/gpu/overlap_add_kernel.cu +++ b/paddle/phi/kernels/gpu/overlap_add_kernel.cu @@ -146,6 +146,6 @@ PD_REGISTER_KERNEL(overlap_add, int64_t, float, double, - paddle::platform::float16, + phi::dtype::float16, paddle::platform::complex, paddle::platform::complex) {} diff --git a/paddle/phi/kernels/gpu/set_value_kernel.cu b/paddle/phi/kernels/gpu/set_value_kernel.cu index 1a268c2f6b..2cd0e66675 100644 --- a/paddle/phi/kernels/gpu/set_value_kernel.cu +++ b/paddle/phi/kernels/gpu/set_value_kernel.cu @@ -28,7 +28,7 @@ PD_REGISTER_KERNEL(set_value, int, int64_t, bool, - paddle::platform::float16, + phi::dtype::float16, phi::dtype::complex, phi::dtype::complex) {} PD_REGISTER_KERNEL(set_value_with_tensor, @@ -40,6 +40,6 @@ PD_REGISTER_KERNEL(set_value_with_tensor, int, int64_t, bool, - paddle::platform::float16, + phi::dtype::float16, phi::dtype::complex, phi::dtype::complex) {} diff --git a/paddle/phi/kernels/gpudnn/conv_grad_kernel.cu b/paddle/phi/kernels/gpudnn/conv_grad_kernel.cu index 43b0d0a81d..276480ed54 100644 --- a/paddle/phi/kernels/gpudnn/conv_grad_kernel.cu +++ b/paddle/phi/kernels/gpudnn/conv_grad_kernel.cu @@ -24,7 +24,6 @@ #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" -#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" diff --git a/paddle/phi/kernels/impl/atan2_grad_kernel_impl.h b/paddle/phi/kernels/impl/atan2_grad_kernel_impl.h index 8b3ced7387..d0dd182985 100644 --- a/paddle/phi/kernels/impl/atan2_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/atan2_grad_kernel_impl.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/fluid/platform/for_range.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/kernels/atan2_grad_kernel.h" +#include "paddle/phi/kernels/funcs/for_range.h" namespace phi { diff --git a/paddle/phi/kernels/impl/atan2_kernel_impl.h b/paddle/phi/kernels/impl/atan2_kernel_impl.h index e80256b725..2cae914e2f 100644 --- a/paddle/phi/kernels/impl/atan2_kernel_impl.h +++ b/paddle/phi/kernels/impl/atan2_kernel_impl.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/fluid/platform/for_range.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/kernels/atan2_kernel.h" +#include "paddle/phi/kernels/funcs/for_range.h" namespace phi { template diff --git a/paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h b/paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h index 7ffd69e16e..8e5fab281e 100644 --- a/paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/cholesky_grad_kernel_impl.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/platform/for_range.h" #include "paddle/phi/kernels/cholesky_grad_kernel.h" #include "paddle/phi/kernels/funcs/blas/blas.h" +#include "paddle/phi/kernels/funcs/for_range.h" namespace phi { @@ -280,7 +280,7 @@ void CholeskyGradKernel(const Context& dev_ctx, blas.MatMul(l, trans_desc, l_grad, no_trans_desc, T(1), &middle, T(0)); /*! phi.tril_().diagonal(0, -2, -1).mul_(0.5) */ - paddle::platform::ForRange for_range(dev_ctx, tensor_size); + phi::funcs::ForRange for_range(dev_ctx, tensor_size); MatrixBandPartScaleEndFunctor matrix_band_part_scale_end_functor( m, m, diff --git a/paddle/phi/kernels/impl/conv_cudnn_impl.h b/paddle/phi/kernels/impl/conv_cudnn_impl.h index b66dd60279..cf419cd1fd 100644 --- a/paddle/phi/kernels/impl/conv_cudnn_impl.h +++ b/paddle/phi/kernels/impl/conv_cudnn_impl.h @@ -24,9 +24,9 @@ #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" -#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/backends/dynload/cudnn.h" +#include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/funcs/padding.h" diff --git a/paddle/phi/kernels/impl/eye_kernel_impl.h b/paddle/phi/kernels/impl/eye_kernel_impl.h index 57b9ce73e8..2d373f99a2 100644 --- a/paddle/phi/kernels/impl/eye_kernel_impl.h +++ b/paddle/phi/kernels/impl/eye_kernel_impl.h @@ -14,8 +14,8 @@ #pragma once -#include "paddle/fluid/platform/for_range.h" #include "paddle/phi/common/scalar.h" +#include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { @@ -48,7 +48,7 @@ void EyeKernel(const Context& ctx, phi::funcs::SetConstant set_zero; set_zero(ctx, out, static_cast(0)); int64_t num_eyes = (std::min)(rows, columns); - paddle::platform::ForRange for_range(ctx, num_eyes); + phi::funcs::ForRange for_range(ctx, num_eyes); EyeFunctor functor(columns, out_data); for_range(functor); } diff --git a/paddle/phi/kernels/impl/selu_grad_kernel_impl.h b/paddle/phi/kernels/impl/selu_grad_kernel_impl.h index 4f6550b9be..25d5e4e511 100644 --- a/paddle/phi/kernels/impl/selu_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/selu_grad_kernel_impl.h @@ -28,7 +28,7 @@ void SeluGradKernel(const Context& dev_ctx, SeluGradFunctor functor( out.data(), dout.data(), alpha, scale, dx_ptr); size_t limit = static_cast(out.numel()); - paddle::platform::ForRange for_range(dev_ctx, limit); + phi::funcs::ForRange for_range(dev_ctx, limit); for_range(functor); } } // namespace phi diff --git a/paddle/phi/kernels/impl/selu_kernel_impl.h b/paddle/phi/kernels/impl/selu_kernel_impl.h index 0725b14125..c5d756e6eb 100644 --- a/paddle/phi/kernels/impl/selu_kernel_impl.h +++ b/paddle/phi/kernels/impl/selu_kernel_impl.h @@ -16,8 +16,8 @@ #include #include "paddle/fluid/operators/math.h" -#include "paddle/fluid/platform/for_range.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/kernels/funcs/for_range.h" namespace phi { @@ -86,7 +86,7 @@ void SeluKernel(const Context& dev_ctx, auto out_ptr = dev_ctx.template Alloc(out); SeluFunctor functor(x.data(), alpha, scale, out_ptr); size_t limit = static_cast(x.numel()); - paddle::platform::ForRange for_range(dev_ctx, limit); + phi::funcs::ForRange for_range(dev_ctx, limit); for_range(functor); } } // namespace phi diff --git a/paddle/phi/kernels/xpu/bmm_grad_kernel.cc b/paddle/phi/kernels/xpu/bmm_grad_kernel.cc index 246da888d2..3d19c8cecd 100644 --- a/paddle/phi/kernels/xpu/bmm_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/bmm_grad_kernel.cc @@ -27,7 +27,7 @@ void MatMul(const Context& dev_ctx, DenseTensor* out) { dev_ctx.template Alloc(out); xpu::Context* xpu_ctx = dev_ctx.x_context(); - if (std::is_same::value) { + if (std::is_same::value) { MatMulXPUFunction(a, b, out, trans_a, trans_b, xpu_ctx); } else { if (std::getenv("XPU_PADDLE_FC_INT32") != nullptr) { @@ -99,9 +99,6 @@ void BmmGradKernel(const Context& dev_ctx, } // namespace phi -PD_REGISTER_KERNEL(bmm_grad, - XPU, - ALL_LAYOUT, - phi::BmmGradKernel, - float, - paddle::platform::float16) {} +PD_REGISTER_KERNEL( + bmm_grad, XPU, ALL_LAYOUT, phi::BmmGradKernel, float, phi::dtype::float16) { +} diff --git a/paddle/phi/kernels/xpu/bmm_kernel.cc b/paddle/phi/kernels/xpu/bmm_kernel.cc index b75383bbaa..c927b7e307 100644 --- a/paddle/phi/kernels/xpu/bmm_kernel.cc +++ b/paddle/phi/kernels/xpu/bmm_kernel.cc @@ -62,7 +62,7 @@ void BmmKernel(const Context& dev_ctx, y_dims[1])); xpu::Context* xpu_ctx = dev_ctx.x_context(); - if (std::is_same::value) { + if (std::is_same::value) { MatMulXPUFunction(x, y, out, trans_x, trans_y, xpu_ctx); } else { if (std::getenv("XPU_PADDLE_FC_INT32") != nullptr) { @@ -77,4 +77,4 @@ void BmmKernel(const Context& dev_ctx, } // namespace phi PD_REGISTER_KERNEL( - bmm, XPU, ALL_LAYOUT, phi::BmmKernel, float, paddle::platform::float16) {} + bmm, XPU, ALL_LAYOUT, phi::BmmKernel, float, phi::dtype::float16) {} diff --git a/paddle/phi/kernels/xpu/xpu_api_wrapper.h b/paddle/phi/kernels/xpu/xpu_api_wrapper.h index 8fefbd84c6..8433c6b421 100644 --- a/paddle/phi/kernels/xpu/xpu_api_wrapper.h +++ b/paddle/phi/kernels/xpu/xpu_api_wrapper.h @@ -34,7 +34,7 @@ enum XPUFCCalcType { template XPUFCCalcType FCCalcType() { - if (std::is_same::value || + if (std::is_same::value || std::is_same::value) { return XPUFCCalcType::FC_INT16; } else if (std::getenv("XPU_PADDLE_FC_INT32") != nullptr) { -- GitLab