diff --git a/paddle/phi/core/utils/data_type.h b/paddle/phi/core/utils/data_type.h index 9877149dc52bd8308c459f8f8f53a8f090ab57e7..5e53d8c95b0b96ba5e868c4183450a904917eed4 100644 --- a/paddle/phi/core/utils/data_type.h +++ b/paddle/phi/core/utils/data_type.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include +#include #include #include @@ -23,6 +24,23 @@ limitations under the License. */ namespace phi { +// Here we can't depend on the fluid proto::VarType, so we use the dtype enum +// value directly. See also `assign_value_sig.cc`. +// proto::VarType::INT16 -> 1 -> phi::DataType::INT16 +// proto::VarType::INT32 -> 2 -> phi::DataType::INT32 +// proto::VarType::INT64 -> 3 -> phi::DataType::INT64 +// proto::VarType::FP16 -> 4 -> phi::DataType::FLOAT16 +// proto::VarType::FP32 -> 5 -> phi::DataType::FLOAT32 +// proto::VarType::FP64 -> 6 -> phi::DataType::FLOAT64 +// proto::VarType::UINT8 -> 20 -> phi::DataType::UINT8 +static std::map var_type_map{{1, phi::DataType::INT16}, + {2, phi::DataType::INT32}, + {3, phi::DataType::INT64}, + {4, phi::DataType::FLOAT16}, + {5, phi::DataType::FLOAT32}, + {6, phi::DataType::FLOAT64}, + {20, phi::DataType::UINT8}}; + #define _PhiForEachDataTypeHelper_(callback, cpp_type, data_type) \ callback(cpp_type, data_type); diff --git a/paddle/phi/kernels/cpu/arg_min_max_kernel.cc b/paddle/phi/kernels/cpu/arg_min_max_kernel.cc index 13e401b59d643dd1ee23f8842f048f0d23cf9c46..999cb16620d26fe2780699de2ba35fd1ebb8e503 100644 --- a/paddle/phi/kernels/cpu/arg_min_max_kernel.cc +++ b/paddle/phi/kernels/cpu/arg_min_max_kernel.cc @@ -17,6 +17,7 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/utils/data_type.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function.h" @@ -141,15 +142,14 @@ void ArgMinMaxKernel(const Context& dev_ctx, int dtype, DenseTensor* out) { if (dtype < 0) { - paddle::framework::VisitDataTypeTiny( - static_cast( - paddle::framework::proto::VarType::INT64), + phi::VisitDataTypeTiny( + phi::DataType::INT64, VisitDataArgMinMaxFunctor( dev_ctx, x, axis.to(), keepdims, flatten, out)); return; } - paddle::framework::VisitDataTypeTiny( - static_cast(dtype), + phi::VisitDataTypeTiny( + var_type_map[dtype], VisitDataArgMinMaxFunctor( dev_ctx, x, axis.to(), keepdims, flatten, out)); } diff --git a/paddle/phi/kernels/cpu/cumprod_grad_kernel.cc b/paddle/phi/kernels/cpu/cumprod_grad_kernel.cc index a25f9650fc50fefa3899da13b55b985c164a394a..2777622612a81b6653f5dbb7416c8051d81382cd 100644 --- a/paddle/phi/kernels/cpu/cumprod_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/cumprod_grad_kernel.cc @@ -23,7 +23,7 @@ #include "paddle/phi/kernels/funcs/for_range.h" // NOTE(@xiongkun): use of IsComplex<> -#include "paddle/fluid/framework/data_type.h" +#include "paddle/phi/core/utils/data_type.h" namespace phi { template @@ -51,7 +51,7 @@ void CumprodGradKernel(const Context& dev_ctx, const T* out_data_deal; Allocator::AllocationPtr x_conj; Allocator::AllocationPtr out_conj; - if (paddle::framework::IsComplex::value) { + if (phi::IsComplexType(x.dtype())) { x_conj = const_cast(dev_ctx.GetAllocator()) .Allocate(numel * sizeof(T)); auto* x_data_conj = reinterpret_cast(x_conj->ptr()); diff --git a/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc b/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc index 86fe53b72c98595d251d35a21390de8a90799d0a..07df5f1f566a4cf58da8ceb0ada93d4409bb29fb 100644 --- a/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc +++ b/paddle/phi/kernels/cpu/unique_consecutive_kernel.cc @@ -18,8 +18,7 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/errors.h" #include "paddle/phi/core/kernel_registry.h" - -#include "paddle/fluid/framework/data_type.h" +#include "paddle/phi/core/utils/data_type.h" namespace phi { @@ -33,8 +32,8 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, DenseTensor* out, DenseTensor* index, DenseTensor* counts) { - auto data_type = static_cast(dtype); - if (data_type == paddle::framework::proto::VarType::INT32) { + auto data_type = var_type_map[dtype]; + if (data_type == phi::DataType::INT32) { PADDLE_ENFORCE_LE( x.numel(), INT_MAX, @@ -46,13 +45,13 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, } if (axis.empty()) { - paddle::framework::VisitDataTypeTiny( + phi::VisitDataTypeTiny( data_type, UniqueConsecutiveFlattenedTensorFunctor( dev_ctx, x, out, return_inverse, return_counts, index, counts)); } else { int valid_axis = axis[0]; - paddle::framework::VisitDataTypeTiny( + phi::VisitDataTypeTiny( data_type, UniqueConsecutiveDimFunctor(dev_ctx, x, diff --git a/paddle/phi/kernels/funcs/math_function.cc b/paddle/phi/kernels/funcs/math_function.cc index 7cad886654852af432b09d755f437716d89de152..8b6fd117e9651366b5e878dc7d9f11e5700d32f3 100644 --- a/paddle/phi/kernels/funcs/math_function.cc +++ b/paddle/phi/kernels/funcs/math_function.cc @@ -26,10 +26,10 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/math_function_impl.h" #include "unsupported/Eigen/CXX11/Tensor" diff --git a/paddle/phi/kernels/funcs/math_function.cu b/paddle/phi/kernels/funcs/math_function.cu index ea2b23ca813d3d590d94c9c57808a30f92170766..7e7a6c30c34fc3cc51155089517009d45ef925c8 100644 --- a/paddle/phi/kernels/funcs/math_function.cu +++ b/paddle/phi/kernels/funcs/math_function.cu @@ -14,12 +14,12 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function_impl.h" diff --git a/paddle/phi/kernels/funcs/math_function_impl.h b/paddle/phi/kernels/funcs/math_function_impl.h index 1ab9455215e9a82d332242f8af1868d6408bdf0d..b59a249bbbf04685f3f3c495bcf567fe34ab8c56 100644 --- a/paddle/phi/kernels/funcs/math_function_impl.h +++ b/paddle/phi/kernels/funcs/math_function_impl.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/data_type.h" +#include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { diff --git a/paddle/phi/kernels/gpu/arg_min_max_kernel.cu b/paddle/phi/kernels/gpu/arg_min_max_kernel.cu index 13db18534955b6a1200641b53b185e11afd033bd..0cbf206bb36894c93298ff811ea3823b9996c79c 100644 --- a/paddle/phi/kernels/gpu/arg_min_max_kernel.cu +++ b/paddle/phi/kernels/gpu/arg_min_max_kernel.cu @@ -28,9 +28,8 @@ namespace cub = hipcub; #endif #include -#include "paddle/fluid/framework/data_type.h" #include "paddle/phi/core/ddim.h" - +#include "paddle/phi/core/utils/data_type.h" namespace phi { namespace { // NOLINT @@ -209,15 +208,14 @@ void ArgMinMaxOpCUDAKernel(const Context& dev_ctx, int dtype, DenseTensor* out) { if (dtype < 0) { - paddle::framework::VisitDataTypeTiny( - static_cast( - paddle::framework::proto::VarType::INT64), + phi::VisitDataTypeTiny( + phi::DataType::INT64, VisitDataCudaArgMinMaxFunctor( dev_ctx, x, axis.to(), keepdims, flatten, out)); return; } - paddle::framework::VisitDataTypeTiny( - static_cast(dtype), + phi::VisitDataTypeTiny( + var_type_map[dtype], VisitDataCudaArgMinMaxFunctor( dev_ctx, x, axis.to(), keepdims, flatten, out)); } diff --git a/paddle/phi/kernels/gpu/cumprod_grad_kernel.cu b/paddle/phi/kernels/gpu/cumprod_grad_kernel.cu index aee1aaa9555b62dafe9de764d9832dc41a395265..d8e375825cdab37022cf9fd98046dd465b2a8f76 100644 --- a/paddle/phi/kernels/gpu/cumprod_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/cumprod_grad_kernel.cu @@ -24,7 +24,7 @@ #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/funcs/for_range.h" // NOTE(@xiongkun): use of IsComplex<> -#include "paddle/fluid/framework/data_type.h" +#include "paddle/phi/core/utils/data_type.h" namespace phi { @@ -152,7 +152,7 @@ void CumprodGradKernel(const Context &dev_ctx, const T *y_data_deal; Allocator::AllocationPtr x_conj; Allocator::AllocationPtr y_conj; - if (paddle::framework::IsComplex::value) { + if (phi::IsComplexType(x.dtype())) { x_conj = const_cast(dev_ctx.GetAllocator()) .Allocate(numel * sizeof(T)); auto *x_data_conj = reinterpret_cast(x_conj->ptr()); diff --git a/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu b/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu index 4ce91a0dd66b4bad79406e716d23e9dce460424f..9eb9309bb496f9b643a8f20e4cd24006ca10ebab 100644 --- a/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu +++ b/paddle/phi/kernels/gpu/unique_consecutive_kernel.cu @@ -21,8 +21,6 @@ #include "paddle/phi/core/errors.h" #include "paddle/phi/core/kernel_registry.h" -#include "paddle/fluid/framework/data_type.h" - namespace phi { template @@ -35,8 +33,8 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, DenseTensor* out, DenseTensor* index, DenseTensor* counts) { - auto data_type = static_cast(dtype); - if (data_type == paddle::framework::proto::VarType::INT32) { + auto data_type = var_type_map[dtype]; + if (data_type == phi::DataType::INT32) { PADDLE_ENFORCE_LE( x.numel() + 1, INT_MAX, @@ -49,14 +47,14 @@ void UniqueConsecutiveKernel(const Context& dev_ctx, // if 'axis' is not required, flatten the Tensor. if (axis.empty()) { - paddle::framework::VisitDataTypeTiny( + phi::VisitDataTypeTiny( data_type, UniqueConsecutiveFlattenedCUDAFunctor( dev_ctx, x, out, return_inverse, return_counts, index, counts)); } else { // 'axis' is required. int valid_axis = axis[0]; - paddle::framework::VisitDataTypeTiny( + phi::VisitDataTypeTiny( data_type, UniqueConsecutiveDimsCUDAFunctor(dev_ctx, x, diff --git a/paddle/phi/kernels/impl/isclose_kernel_impl.h b/paddle/phi/kernels/impl/isclose_kernel_impl.h index 25247ceaff6c0a5f52a639176ce04c0589cbbd87..cf7171656486c19d7acf260ca7221da67c630e6b 100644 --- a/paddle/phi/kernels/impl/isclose_kernel_impl.h +++ b/paddle/phi/kernels/impl/isclose_kernel_impl.h @@ -16,7 +16,6 @@ #include #include -#include "paddle/fluid/framework/data_type.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/data_type.h" diff --git a/paddle/phi/kernels/xpu/arg_min_max_kernel.cc b/paddle/phi/kernels/xpu/arg_min_max_kernel.cc index a48e2155a251a4c14fb09bc0d29391c1fab8a589..b3a73931723ac5fd9377d5297368ea4df54fcfde 100644 --- a/paddle/phi/kernels/xpu/arg_min_max_kernel.cc +++ b/paddle/phi/kernels/xpu/arg_min_max_kernel.cc @@ -17,6 +17,7 @@ #include "paddle/phi/backends/xpu/xpu_context.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/utils/data_type.h" namespace phi {