From 1631836f7c5894b9be9023205e5cb8e8f738a2b3 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Wed, 9 Nov 2022 17:03:29 +0800 Subject: [PATCH] [PHI decoupling] remove framework/data_type.h from phi (#47776) * remove framework/data_type.h from phi * fix CI fail: map proto::VarType to phi::DataType * refactor code to add more detailed comments --- paddle/phi/core/utils/data_type.h | 18 ++++++++++++++++++ paddle/phi/kernels/cpu/arg_min_max_kernel.cc | 10 +++++----- paddle/phi/kernels/cpu/cumprod_grad_kernel.cc | 4 ++-- .../kernels/cpu/unique_consecutive_kernel.cc | 11 +++++------ paddle/phi/kernels/funcs/math_function.cc | 2 +- paddle/phi/kernels/funcs/math_function.cu | 2 +- paddle/phi/kernels/funcs/math_function_impl.h | 2 +- paddle/phi/kernels/gpu/arg_min_max_kernel.cu | 12 +++++------- paddle/phi/kernels/gpu/cumprod_grad_kernel.cu | 4 ++-- .../kernels/gpu/unique_consecutive_kernel.cu | 10 ++++------ paddle/phi/kernels/impl/isclose_kernel_impl.h | 1 - paddle/phi/kernels/xpu/arg_min_max_kernel.cc | 1 + 12 files changed, 45 insertions(+), 32 deletions(-) diff --git a/paddle/phi/core/utils/data_type.h b/paddle/phi/core/utils/data_type.h index 9877149dc5..5e53d8c95b 100644 --- a/paddle/phi/core/utils/data_type.h +++ b/paddle/phi/core/utils/data_type.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include +#include #include #include @@ -23,6 +24,23 @@ limitations under the License. */ namespace phi { +// Here we can't depend on the fluid proto::VarType, so we use the dtype enum +// value directly. See also `assign_value_sig.cc`. +// proto::VarType::INT16 -> 1 -> phi::DataType::INT16 +// proto::VarType::INT32 -> 2 -> phi::DataType::INT32 +// proto::VarType::INT64 -> 3 -> phi::DataType::INT64 +// proto::VarType::FP16 -> 4 -> phi::DataType::FLOAT16 +// proto::VarType::FP32 -> 5 -> phi::DataType::FLOAT32 +// proto::VarType::FP64 -> 6 -> phi::DataType::FLOAT64 +// proto::VarType::UINT8 -> 20 -> phi::DataType::UINT8 +static std::map var_type_map{{1, phi::DataType::INT16}, + {2, phi::DataType::INT32}, + {3, phi::DataType::INT64}, + {4, phi::DataType::FLOAT16}, + {5, phi::DataType::FLOAT32}, + {6, phi::DataType::FLOAT64}, + {20, phi::DataType::UINT8}}; + #define _PhiForEachDataTypeHelper_(callback, cpp_type, data_type) \ callback(cpp_type, data_type); diff --git a/paddle/phi/kernels/cpu/arg_min_max_kernel.cc b/paddle/phi/kernels/cpu/arg_min_max_kernel.cc index 13e401b59d..999cb16620 100644 --- a/paddle/phi/kernels/cpu/arg_min_max_kernel.cc +++ b/paddle/phi/kernels/cpu/arg_min_max_kernel.cc @@ -17,6 +17,7 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/utils/data_type.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function.h" @@ -141,15 +142,14 @@ void ArgMinMaxKernel(const Context& dev_ctx, int dtype, DenseTensor* out) { if (dtype < 0) { - paddle::framework::VisitDataTypeTiny( - static_cast( - paddle::framework::proto::VarType::INT64), + phi::VisitDataTypeTiny( + phi::DataType::INT64, VisitDataArgMinMaxFunctor( dev_ctx, x, axis.to(), keepdims, flatten, out)); return; } - paddle::framework::VisitDataTypeTiny( - static_cast(dtype), + phi::VisitDataTypeTiny( + var_type_map[dtype], VisitDataArgMinMaxFunctor( dev_ctx, x, axis.to(), keepdims, flatten, out)); } diff --git a/paddle/phi/kernels/cpu/cumprod_grad_kernel.cc b/paddle/phi/kernels/cpu/cumprod_grad_kernel.cc index a25f9650fc..2777622612 100644 --- a/paddle/phi/kernels/cpu/cumprod_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/cumprod_grad_kernel.cc @@ -23,7 +23,7 @@ #include "paddle/phi/kernels/funcs/for_range.h" // NOTE(@xiongkun): use of IsComplex<> -#include "paddle/fluid/framework/data_type.h" +#include "paddle/phi/core/utils/data_type.h" namespace phi { template @@ -51,7 +51,7 @@ void CumprodGradKernel(const Context& dev_ctx, const T* out_data_deal; Allocator::AllocationPtr x_conj; Allocator::AllocationPtr out_conj; - if (paddle::framework::IsComplex::value) { + if (phi::IsComplexType(x.dtype())) { x_conj = const_cast(dev_ctx.GetAllocator()) .Allocate(numel * sizeof(T)); auto* x_data_conj = reinterpret_cast(x_conj->ptr()); diff --git a/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc b/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc index 86fe53b72c..07df5f1f56 100644 --- a/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc +++ b/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc @@ -18,8 +18,7 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/errors.h" #include "paddle/phi/core/kernel_registry.h" - -#include "paddle/fluid/framework/data_type.h" +#include "paddle/phi/core/utils/data_type.h" namespace phi { @@ -33,8 +32,8 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, DenseTensor* out, DenseTensor* index, DenseTensor* counts) { - auto data_type = static_cast(dtype); - if (data_type == paddle::framework::proto::VarType::INT32) { + auto data_type = var_type_map[dtype]; + if (data_type == phi::DataType::INT32) { PADDLE_ENFORCE_LE( x.numel(), INT_MAX, @@ -46,13 +45,13 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, } if (axis.empty()) { - paddle::framework::VisitDataTypeTiny( + phi::VisitDataTypeTiny( data_type, UniqueConsecutiveFlattenedTensorFunctor( dev_ctx, x, out, return_inverse, return_counts, index, counts)); } else { int valid_axis = axis[0]; - paddle::framework::VisitDataTypeTiny( + phi::VisitDataTypeTiny( data_type, UniqueConsecutiveDimFunctor(dev_ctx, x, diff --git a/paddle/phi/kernels/funcs/math_function.cc b/paddle/phi/kernels/funcs/math_function.cc index 7cad886654..8b6fd117e9 100644 --- a/paddle/phi/kernels/funcs/math_function.cc +++ b/paddle/phi/kernels/funcs/math_function.cc @@ -26,10 +26,10 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function_impl.h" #include "unsupported/Eigen/CXX11/Tensor" diff --git a/paddle/phi/kernels/funcs/math_function.cu b/paddle/phi/kernels/funcs/math_function.cu index ea2b23ca81..7e7a6c30c3 100644 --- a/paddle/phi/kernels/funcs/math_function.cu +++ b/paddle/phi/kernels/funcs/math_function.cu @@ -14,12 +14,12 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function_impl.h" diff --git a/paddle/phi/kernels/funcs/math_function_impl.h b/paddle/phi/kernels/funcs/math_function_impl.h index 1ab9455215..b59a249bbb 100644 --- a/paddle/phi/kernels/funcs/math_function_impl.h +++ b/paddle/phi/kernels/funcs/math_function_impl.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/data_type.h" +#include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { diff --git a/paddle/phi/kernels/gpu/arg_min_max_kernel.cu b/paddle/phi/kernels/gpu/arg_min_max_kernel.cu index 13db185349..0cbf206bb3 100644 --- a/paddle/phi/kernels/gpu/arg_min_max_kernel.cu +++ b/paddle/phi/kernels/gpu/arg_min_max_kernel.cu @@ -28,9 +28,8 @@ namespace cub = hipcub; #endif #include -#include "paddle/fluid/framework/data_type.h" #include "paddle/phi/core/ddim.h" - +#include "paddle/phi/core/utils/data_type.h" namespace phi { namespace { // NOLINT @@ -209,15 +208,14 @@ void ArgMinMaxOpCUDAKernel(const Context& dev_ctx, int dtype, DenseTensor* out) { if (dtype < 0) { - paddle::framework::VisitDataTypeTiny( - static_cast( - paddle::framework::proto::VarType::INT64), + phi::VisitDataTypeTiny( + phi::DataType::INT64, VisitDataCudaArgMinMaxFunctor( dev_ctx, x, axis.to(), keepdims, flatten, out)); return; } - paddle::framework::VisitDataTypeTiny( - static_cast(dtype), + phi::VisitDataTypeTiny( + var_type_map[dtype], VisitDataCudaArgMinMaxFunctor( dev_ctx, x, axis.to(), keepdims, flatten, out)); } diff --git a/paddle/phi/kernels/gpu/cumprod_grad_kernel.cu b/paddle/phi/kernels/gpu/cumprod_grad_kernel.cu index aee1aaa955..d8e375825c 100644 --- a/paddle/phi/kernels/gpu/cumprod_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/cumprod_grad_kernel.cu @@ -24,7 +24,7 @@ #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/funcs/for_range.h" // NOTE(@xiongkun): use of IsComplex<> -#include "paddle/fluid/framework/data_type.h" +#include "paddle/phi/core/utils/data_type.h" namespace phi { @@ -152,7 +152,7 @@ void CumprodGradKernel(const Context &dev_ctx, const T *y_data_deal; Allocator::AllocationPtr x_conj; Allocator::AllocationPtr y_conj; - if (paddle::framework::IsComplex::value) { + if (phi::IsComplexType(x.dtype())) { x_conj = const_cast(dev_ctx.GetAllocator()) .Allocate(numel * sizeof(T)); auto *x_data_conj = reinterpret_cast(x_conj->ptr()); diff --git a/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu b/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu index 4ce91a0dd6..9eb9309bb4 100644 --- a/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu +++ b/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu @@ -21,8 +21,6 @@ #include "paddle/phi/core/errors.h" #include "paddle/phi/core/kernel_registry.h" -#include "paddle/fluid/framework/data_type.h" - namespace phi { template @@ -35,8 +33,8 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, DenseTensor* out, DenseTensor* index, DenseTensor* counts) { - auto data_type = static_cast(dtype); - if (data_type == paddle::framework::proto::VarType::INT32) { + auto data_type = var_type_map[dtype]; + if (data_type == phi::DataType::INT32) { PADDLE_ENFORCE_LE( x.numel() + 1, INT_MAX, @@ -49,14 +47,14 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, // if 'axis' is not required, flatten the Tensor. if (axis.empty()) { - paddle::framework::VisitDataTypeTiny( + phi::VisitDataTypeTiny( data_type, UniqueConsecutiveFlattenedCUDAFunctor( dev_ctx, x, out, return_inverse, return_counts, index, counts)); } else { // 'axis' is required. int valid_axis = axis[0]; - paddle::framework::VisitDataTypeTiny( + phi::VisitDataTypeTiny( data_type, UniqueConsecutiveDimsCUDAFunctor(dev_ctx, x, diff --git a/paddle/phi/kernels/impl/isclose_kernel_impl.h b/paddle/phi/kernels/impl/isclose_kernel_impl.h index 25247ceaff..cf71716564 100644 --- a/paddle/phi/kernels/impl/isclose_kernel_impl.h +++ b/paddle/phi/kernels/impl/isclose_kernel_impl.h @@ -16,7 +16,6 @@ #include #include -#include "paddle/fluid/framework/data_type.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/data_type.h" diff --git a/paddle/phi/kernels/xpu/arg_min_max_kernel.cc b/paddle/phi/kernels/xpu/arg_min_max_kernel.cc index a48e2155a2..b3a7393172 100644 --- a/paddle/phi/kernels/xpu/arg_min_max_kernel.cc +++ b/paddle/phi/kernels/xpu/arg_min_max_kernel.cc @@ -17,6 +17,7 @@ #include "paddle/phi/backends/xpu/xpu_context.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/utils/data_type.h" namespace phi { -- GitLab