From 4c375454585cae5612fd9a4f92d325b340c92b69 Mon Sep 17 00:00:00 2001 From: huangjiyi <43315610+huangjiyi@users.noreply.github.com> Date: Thu, 10 Nov 2022 16:27:17 +0800 Subject: [PATCH] [PHI Decoupling] remove dependency on "paddle/fluid/platform/errors.h" and "paddle/fluid/platform/fast_divmod.h" in phi. (#47815) * rm "paddle/fluid/platform/errors.h" in phi * rm "paddle/fluid/platform/fast_divmod.h" in phi --- paddle/phi/backends/dynload/tensorrt.cc | 8 +-- paddle/phi/core/selected_rows_impl.cc | 8 +-- paddle/phi/kernels/autotune/auto_tune_base.h | 4 +- paddle/phi/kernels/funcs/concat_funcs.h | 2 +- .../phi/kernels/funcs/interpolate_function.h | 4 +- paddle/phi/kernels/funcs/pooling.cu | 58 +++++++++---------- .../kernels/gpu/interpolate_grad_kernel.cu | 2 +- paddle/phi/kernels/gpu/interpolate_kernel.cu | 4 +- .../impl/broadcast_tensors_kernel_impl.h | 2 +- paddle/phi/tests/api/scale_api.h | 6 +- .../phi/tests/kernels/test_math_function.cc | 12 ++-- .../phi/tests/kernels/test_math_function.cu | 2 +- 12 files changed, 56 insertions(+), 56 deletions(-) diff --git a/paddle/phi/backends/dynload/tensorrt.cc b/paddle/phi/backends/dynload/tensorrt.cc index 2e2319a47cc..7380788facc 100644 --- a/paddle/phi/backends/dynload/tensorrt.cc +++ b/paddle/phi/backends/dynload/tensorrt.cc @@ -40,10 +40,10 @@ void* GetDsoHandle(const std::string& dso_name) { void* dso_handle = dlopen(dso_name.c_str(), dynload_flags); - PADDLE_ENFORCE_NOT_NULL(dso_handle, - paddle::platform::errors::NotFound( - "TensorRT is needed, " - "but TensorRT dynamic library is not found.")); + PADDLE_ENFORCE_NOT_NULL( + dso_handle, + phi::errors::NotFound("TensorRT is needed, " + "but TensorRT dynamic library is not found.")); return dso_handle; } diff --git a/paddle/phi/core/selected_rows_impl.cc b/paddle/phi/core/selected_rows_impl.cc index a5a121cd60b..f099ea711f1 100644 --- a/paddle/phi/core/selected_rows_impl.cc +++ b/paddle/phi/core/selected_rows_impl.cc @@ -174,10 +174,10 @@ void SelectedRowsImpl::Get(const phi::DenseTensor& ids, phi::DenseTensor* value, bool auto_grown, bool is_test) { - PADDLE_ENFORCE_EQ(value->IsInitialized(), - true, - paddle::platform::errors::InvalidArgument( - "The value tensor is not initialized.")); + PADDLE_ENFORCE_EQ( + value->IsInitialized(), + true, + phi::errors::InvalidArgument("The value tensor is not initialized.")); if (ids.numel() == 0) { VLOG(3) << "keys is empty, please check data!"; } else { diff --git a/paddle/phi/kernels/autotune/auto_tune_base.h b/paddle/phi/kernels/autotune/auto_tune_base.h index d9f96ec2328..2a10314f844 100644 --- a/paddle/phi/kernels/autotune/auto_tune_base.h +++ b/paddle/phi/kernels/autotune/auto_tune_base.h @@ -70,7 +70,7 @@ class AutoTuneBase { PADDLE_ENFORCE_GT( kernels_.size(), 0, - paddle::platform::errors::InvalidArgument( + phi::errors::InvalidArgument( "kernel num must be greater than 0, now is %d", kernels_.size())); is_init_ = true; @@ -102,7 +102,7 @@ class AutoTuneBase { PADDLE_ENFORCE_GT( kernels_.size(), 0, - paddle::platform::errors::InvalidArgument( + phi::errors::InvalidArgument( "kernel num must be greater than 0, now is %d", kernels_.size())); size_t best_idx = 0; float min_time = std::numeric_limits::max(); diff --git a/paddle/phi/kernels/funcs/concat_funcs.h b/paddle/phi/kernels/funcs/concat_funcs.h index 70e3545b981..1c1e0eb1d0a 100644 --- a/paddle/phi/kernels/funcs/concat_funcs.h +++ b/paddle/phi/kernels/funcs/concat_funcs.h @@ -15,7 +15,7 @@ #pragma once #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/errors.h" +#include "paddle/phi/core/errors.h" namespace phi { namespace funcs { diff --git a/paddle/phi/kernels/funcs/interpolate_function.h b/paddle/phi/kernels/funcs/interpolate_function.h index 42adc94a642..89b02317f3e 100644 --- a/paddle/phi/kernels/funcs/interpolate_function.h +++ b/paddle/phi/kernels/funcs/interpolate_function.h @@ -20,7 +20,7 @@ #include "paddle/phi/kernels/funcs/eigen/common.h" #if defined(__NVCC__) || defined(__HIPCC__) -#include "paddle/fluid/platform/fast_divmod.h" +#include "paddle/phi/kernels/primitive/datamover_primitives.h" #endif namespace phi { @@ -142,7 +142,7 @@ inline std::vector get_new_data_from_tensor( } #if defined(__NVCC__) || defined(__HIPCC__) -using paddle::platform::FastDivMod; +using phi::kps::details::FastDivMod; struct FastDivModForInterpolate { public: diff --git a/paddle/phi/kernels/funcs/pooling.cu b/paddle/phi/kernels/funcs/pooling.cu index a1bb927b73e..59d4d21ec46 100644 --- a/paddle/phi/kernels/funcs/pooling.cu +++ b/paddle/phi/kernels/funcs/pooling.cu @@ -16,56 +16,56 @@ limitations under the License. */ #include #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/fast_divmod.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/kernels/funcs/pooling.h" #include "paddle/phi/kernels/funcs/reduce_function.h" +#include "paddle/phi/kernels/primitive/datamover_primitives.h" namespace phi { namespace funcs { struct FastDivModForPooling { public: - paddle::platform::FastDivMod channel; - paddle::platform::FastDivMod width; - paddle::platform::FastDivMod height; + phi::kps::details::FastDivMod channel; + phi::kps::details::FastDivMod width; + phi::kps::details::FastDivMod height; explicit HOSTDEVICE FastDivModForPooling(const int channels, const int output_width, const int output_height) { - channel = paddle::platform::FastDivMod(channels); - width = paddle::platform::FastDivMod(output_width); - height = paddle::platform::FastDivMod(output_height); + channel = phi::kps::details::FastDivMod(channels); + width = phi::kps::details::FastDivMod(output_width); + height = phi::kps::details::FastDivMod(output_height); } }; struct FastDivModForPooling3D { public: - paddle::platform::FastDivMod channel; - paddle::platform::FastDivMod width; - paddle::platform::FastDivMod height; - paddle::platform::FastDivMod depth; + phi::kps::details::FastDivMod channel; + phi::kps::details::FastDivMod width; + phi::kps::details::FastDivMod height; + phi::kps::details::FastDivMod depth; explicit HOSTDEVICE FastDivModForPooling3D(const int channels, const int output_width, const int output_height, const int output_depth) { - channel = paddle::platform::FastDivMod(channels); - width = paddle::platform::FastDivMod(output_width); - height = paddle::platform::FastDivMod(output_height); - depth = paddle::platform::FastDivMod(output_depth); + channel = phi::kps::details::FastDivMod(channels); + width = phi::kps::details::FastDivMod(output_width); + height = phi::kps::details::FastDivMod(output_height); + depth = phi::kps::details::FastDivMod(output_depth); } }; struct FastDivModForPoolingWithMoreStaff { public: - paddle::platform::FastDivMod channel; - paddle::platform::FastDivMod width; - paddle::platform::FastDivMod height; - paddle::platform::FastDivMod ksize_w; - paddle::platform::FastDivMod ksize_h; - paddle::platform::FastDivMod stride_w; - paddle::platform::FastDivMod stride_h; + phi::kps::details::FastDivMod channel; + phi::kps::details::FastDivMod width; + phi::kps::details::FastDivMod height; + phi::kps::details::FastDivMod ksize_w; + phi::kps::details::FastDivMod ksize_h; + phi::kps::details::FastDivMod stride_w; + phi::kps::details::FastDivMod stride_h; explicit HOSTDEVICE FastDivModForPoolingWithMoreStaff( const int channels, @@ -75,13 +75,13 @@ struct FastDivModForPoolingWithMoreStaff { const int ksize_height, const int stride_width, const int stride_height) { - channel = paddle::platform::FastDivMod(channels); - width = paddle::platform::FastDivMod(input_width); - height = paddle::platform::FastDivMod(input_height); - ksize_w = paddle::platform::FastDivMod(ksize_width); - ksize_h = paddle::platform::FastDivMod(ksize_height); - stride_w = paddle::platform::FastDivMod(stride_width); - stride_h = paddle::platform::FastDivMod(stride_height); + channel = phi::kps::details::FastDivMod(channels); + width = phi::kps::details::FastDivMod(input_width); + height = phi::kps::details::FastDivMod(input_height); + ksize_w = phi::kps::details::FastDivMod(ksize_width); + ksize_h = phi::kps::details::FastDivMod(ksize_height); + stride_w = phi::kps::details::FastDivMod(stride_width); + stride_h = phi::kps::details::FastDivMod(stride_height); } }; diff --git a/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu b/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu index bf230340da0..ee24d9f9005 100644 --- a/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/interpolate_grad_kernel.cu @@ -15,7 +15,6 @@ #include "paddle/phi/kernels/interpolate_grad_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/fast_divmod.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/common/amp_type_traits.h" @@ -24,6 +23,7 @@ #include "paddle/phi/kernels/funcs/interpolate_function.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" +#include "paddle/phi/kernels/primitive/datamover_primitives.h" namespace phi { diff --git a/paddle/phi/kernels/gpu/interpolate_kernel.cu b/paddle/phi/kernels/gpu/interpolate_kernel.cu index 6aa17e3beaf..c8f6a40104b 100644 --- a/paddle/phi/kernels/gpu/interpolate_kernel.cu +++ b/paddle/phi/kernels/gpu/interpolate_kernel.cu @@ -16,7 +16,6 @@ #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" -#include "paddle/fluid/platform/fast_divmod.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/common/amp_type_traits.h" @@ -24,9 +23,10 @@ #include "paddle/phi/common/layout.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/interpolate_function.h" +#include "paddle/phi/kernels/primitive/datamover_primitives.h" namespace phi { -using paddle::platform::FastDivMod; +using phi::kps::details::FastDivMod; template __forceinline__ __device__ void PreCalculatorForLinearInterpInputIndex( diff --git a/paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h b/paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h index 652f0b2eee9..d0b7825d15e 100644 --- a/paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h +++ b/paddle/phi/kernels/impl/broadcast_tensors_kernel_impl.h @@ -108,7 +108,7 @@ void BroadcastTensorsKernel(const Context& ctx, SWITCH_OUT_RANK_CASE(5) SWITCH_OUT_RANK_CASE(6) default: { - PADDLE_THROW(paddle::platform::errors::InvalidArgument( + PADDLE_THROW(phi::errors::InvalidArgument( "Target tensor rank out of range" "Maximum supported rank for broadcast is: 6")); } diff --git a/paddle/phi/tests/api/scale_api.h b/paddle/phi/tests/api/scale_api.h index 0e42b7f2a18..ec0d6d213a2 100644 --- a/paddle/phi/tests/api/scale_api.h +++ b/paddle/phi/tests/api/scale_api.h @@ -129,7 +129,7 @@ static void ScaleCPU(DataType kernel_dtype, break; } default: { - PADDLE_THROW(paddle::platform::errors::Fatal( + PADDLE_THROW(phi::errors::Fatal( "Detected unsupported data type." "Only Float64, Float32, BFloat16, Int64, Int32, Int16, Int8, UInt8 " "are supported for now.")); @@ -188,7 +188,7 @@ static void ScaleGPU(DataType kernel_dtype, break; } default: { - PADDLE_THROW(paddle::platform::errors::Fatal( + PADDLE_THROW(phi::errors::Fatal( "Detected unsupported data type." "Only Float64, Float32, Float16, Int64, Int32, Int16, Int8, UInt8 " "are " @@ -262,7 +262,7 @@ Tensor scale_switch_case(const Tensor& x, break; #endif default: - PADDLE_THROW(paddle::platform::errors::Fatal( + PADDLE_THROW(phi::errors::Fatal( "Detected unsupported backend." "Only CPU and CUDA Backend are supported for now." "Please double check if your backend falls into the above two " diff --git a/paddle/phi/tests/kernels/test_math_function.cc b/paddle/phi/tests/kernels/test_math_function.cc index bcb0e9d7adc..d3f862cd561 100644 --- a/paddle/phi/tests/kernels/test_math_function.cc +++ b/paddle/phi/tests/kernels/test_math_function.cc @@ -275,12 +275,12 @@ TEST(math_funciton, set_constant) { auto* ctx = new phi::CPUContext(); phi::funcs::set_constant(*ctx, &t, 10); for (int64_t i = 0; i < t.numel(); ++i) { - PADDLE_ENFORCE_EQ(10, - t.data()[i], - paddle::platform::errors::InvalidArgument( - "Each value of input tensor should be 10, " - "but received %d.", - t.data()[i])); + PADDLE_ENFORCE_EQ( + 10, + t.data()[i], + phi::errors::InvalidArgument("Each value of input tensor should be 10, " + "but received %d.", + t.data()[i])); } delete ctx; } diff --git a/paddle/phi/tests/kernels/test_math_function.cu b/paddle/phi/tests/kernels/test_math_function.cu index b227523ce0b..aa16c232c79 100644 --- a/paddle/phi/tests/kernels/test_math_function.cu +++ b/paddle/phi/tests/kernels/test_math_function.cu @@ -26,7 +26,7 @@ void fill_fp16_data(phi::dtype::float16* in_ptr, PADDLE_ENFORCE_EQ( size, data.size(), - paddle::platform::errors::InvalidArgument( + phi::errors::InvalidArgument( "The size of argument data should" " be equal to the argument size. Expected %d, but received %d.", size, -- GitLab