diff --git a/paddle/phi/core/custom_kernel.cc b/paddle/phi/core/custom_kernel.cc index f84a2bd8d9c5d0634f29485fc07f649ea9fb1b9e..58f9e1c623e81b4f2877099d1cdc2a8fe2e18b9e 100644 --- a/paddle/phi/core/custom_kernel.cc +++ b/paddle/phi/core/custom_kernel.cc @@ -20,16 +20,16 @@ void RegisterCustomKernels(const CustomKernelMap& custom_kernel_map) { auto& kernel_info_map = custom_kernel_map.GetMap(); VLOG(3) << "Size of custom_kernel_map: " << kernel_info_map.size(); + auto& kernels = KernelFactory::Instance().kernels(); for (auto& pair : kernel_info_map) { - PADDLE_ENFORCE_EQ( - KernelFactory::Instance().HasCompatiblePhiKernel(pair.first), - true, + PADDLE_ENFORCE_NE( + kernels.find(pair.first), + kernels.end(), phi::errors::InvalidArgument( "The kernel %s is not ready for custom kernel registering.", pair.first)); for (auto& info_pair : pair.second) { - auto& kernels = KernelFactory::Instance().kernels(); PADDLE_ENFORCE_EQ( kernels[pair.first].find(info_pair.first), kernels[pair.first].end(), diff --git a/paddle/phi/core/kernel_registry.h b/paddle/phi/core/kernel_registry.h index 6a1688947b986549e1feaf39cdf6c73749b0ff3a..7a05452cbebe08d16a4486a03923431a3e59cb81 100644 --- a/paddle/phi/core/kernel_registry.h +++ b/paddle/phi/core/kernel_registry.h @@ -87,13 +87,11 @@ struct KernelArgsParseFunctor { default_tensor_layout, default_key.dtype(), arg_type); -#ifndef PADDLE_WITH_CUSTOM_KERNEL } else if (arg_type == std::type_index(typeid(const SelectedRows&))) { args_def->AppendInput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); -#endif } else if (arg_type == std::type_index(typeid(DenseTensor*))) { args_def->AppendOutput(default_key.backend(), default_tensor_layout, @@ -105,13 +103,11 @@ struct KernelArgsParseFunctor { default_tensor_layout, default_key.dtype(), arg_type); -#ifndef PADDLE_WITH_CUSTOM_KERNEL } else if (arg_type == std::type_index(typeid(SelectedRows*))) { args_def->AppendOutput(default_key.backend(), default_tensor_layout, default_key.dtype(), arg_type); -#endif } else { // Attribute deal with // TODO(chenweihang): now here allow any types of attribute, maybe diff --git a/paddle/phi/core/kernel_utils.h b/paddle/phi/core/kernel_utils.h index 2fda3cb6db4fdb4aaac7fc7c88075b833c050bad..e5de5e2b49ebb28017ea751c923241f11ea68bf3 100644 --- a/paddle/phi/core/kernel_utils.h +++ b/paddle/phi/core/kernel_utils.h @@ -23,9 +23,7 @@ #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/enforce.h" #include "paddle/phi/core/kernel_context.h" -#ifndef PADDLE_WITH_CUSTOM_KERNEL #include "paddle/phi/core/selected_rows.h" -#endif #include "paddle/phi/core/sparse_coo_tensor.h" #include "paddle/phi/core/sparse_csr_tensor.h" #include "paddle/phi/core/type_defs.h" @@ -222,9 +220,7 @@ struct KernelImpl { PT_SPECIALIZE_KernelCallHelper_FOR_INPUT(DenseTensor); PT_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(DenseTensor); PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_INPUT(DenseTensor); -#ifndef PADDLE_WITH_CUSTOM_KERNEL PT_SPECIALIZE_KernelCallHelper_FOR_INPUT(SelectedRows); -#endif PT_SPECIALIZE_KernelCallHelper_FOR_INPUT(SparseCooTensor); PT_SPECIALIZE_KernelCallHelper_FOR_OPTIONAL_INPUT(SparseCooTensor); @@ -259,9 +255,7 @@ struct KernelImpl { PT_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(DenseTensor); PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(DenseTensor); -#ifndef PADDLE_WITH_CUSTOM_KERNEL PT_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SelectedRows); -#endif PT_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(SparseCooTensor); PT_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(SparseCooTensor); diff --git a/paddle/phi/core/tensor_meta.h b/paddle/phi/core/tensor_meta.h index 3d2da542c74176017492bdb9f567396f81308d6a..f4bd0be0b45b867b8ed98a5c50d2e3f58ea49780 100644 --- a/paddle/phi/core/tensor_meta.h +++ b/paddle/phi/core/tensor_meta.h @@ -23,13 +23,6 @@ limitations under the License. */ #include "paddle/utils/any.h" #include "paddle/utils/optional.h" -// Note: mixed_vector include many header now, LoD will be -// used on CUDA device? Can we use small_vector here? -// @zhanlve: Rollback to original LoD for now -#ifndef PADDLE_WITH_CUSTOM_KERNEL -#include "paddle/fluid/framework/mixed_vector.h" -#endif - namespace phi { using DDim = phi::DDim; diff --git a/paddle/phi/tests/core/test_custom_kernel.cc b/paddle/phi/tests/core/test_custom_kernel.cc index d8e42c9d0d8b11d393dbb71776671d9cb50a7715..69922c055cbac5fe3c3947d0d8d63ee4a1262a4c 100644 --- a/paddle/phi/tests/core/test_custom_kernel.cc +++ b/paddle/phi/tests/core/test_custom_kernel.cc @@ -146,12 +146,10 @@ TEST(CustomKernel, custom_kernel_dot) { custom_fake_dot_kernels.end()); // 3.before register - auto& kernel_factory_instance = phi::KernelFactory::Instance(); auto& kernels = phi::KernelFactory::Instance().kernels(); - EXPECT_TRUE(!kernel_factory_instance.HasCompatiblePhiKernel(op_name)); + EXPECT_TRUE(kernels.find(op_name) == kernels.end()); - // mock fake_dot is supported by phi for HasCompatiblePhiKernel check while - // registering + // mock fake_dot is supported by phi for check while registering auto& fake_dot_kernels = kernels[op_name]; EXPECT_TRUE(fake_dot_kernels.find( @@ -196,7 +194,7 @@ TEST(CustomKernel, custom_kernel_dot) { fake_dot_kernels.end()); // 4.kernel select - auto kernel = kernel_factory_instance.SelectKernelOrThrowError( + auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( op_name, phi::KernelKey(backend, layout, phi::DataType::UINT8)); // 5.prepare parameters for kernel