diff --git a/paddle/fluid/distributed/ps/service/brpc_utils.cc b/paddle/fluid/distributed/ps/service/brpc_utils.cc index 9f2a8eb24533d12ca289543ee7f75d2c05f9b2a3..2009ec772e1cf66d3997e3f4be8f2e67bf2c32e3 100644 --- a/paddle/fluid/distributed/ps/service/brpc_utils.cc +++ b/paddle/fluid/distributed/ps/service/brpc_utils.cc @@ -238,7 +238,7 @@ void DeserializeLodTensor(framework::Variable* var, const VarMsg& msg, void* tensor_data = tensor->mutable_data( place, - framework::TransToPtenDataType(VarMessageToVarType(msg.data_type()))); + framework::TransToPhiDataType(VarMessageToVarType(msg.data_type()))); // IO Buffer if (platform::is_cpu_place(place)) { @@ -281,7 +281,7 @@ void DeserializeSelectedRows( tensor->Resize(phi::make_ddim(vec_dim)); void* tensor_data = tensor->mutable_data( place, - framework::TransToPtenDataType(VarMessageToVarType(msg.data_type()))); + framework::TransToPhiDataType(VarMessageToVarType(msg.data_type()))); // IO Buffer if (platform::is_cpu_place(place)) { unsigned long data_len; // NOLINT diff --git a/paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc b/paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc index 3dbfba0d9150f64afd1002fcf7f3e9365bf786d1..5a2595b9103e4d49845fa8938ee3577b6b3f3f06 100644 --- a/paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc +++ b/paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.cc @@ -33,36 +33,36 @@ static void ScaleDeviceDispatch(const phi::DenseTensor& dense_tensor, phi::DenseTensor* dense_out) { switch (dense_tensor.dtype()) { case phi::DataType::FLOAT64: { - phi::ScaleKernel::TYPE>( - static_cast::TYPE&>(dev_ctx), dense_tensor /* tensor */, scale /* scale */, bias /* bias */, bias_after_scale /* bias_after_scale */, dense_out /* out tensor */); break; } case phi::DataType::FLOAT32: { - phi::ScaleKernel::TYPE>( - static_cast::TYPE&>(dev_ctx), dense_tensor /* tensor */, scale /* scale */, bias /* bias */, bias_after_scale /* bias_after_scale */, dense_out /* out tensor */); break; } case phi::DataType::INT64: { - phi::ScaleKernel::TYPE>( - static_cast::TYPE>( + static_cast::TYPE&>(dev_ctx), dense_tensor /* tensor */, scale /* scale */, bias /* bias */, bias_after_scale /* bias_after_scale */, dense_out /* out tensor */); break; } case phi::DataType::INT32: { - phi::ScaleKernel::TYPE>( - static_cast::TYPE>( + static_cast::TYPE&>(dev_ctx), dense_tensor /* tensor */, scale /* scale */, bias /* bias */, bias_after_scale /* bias_after_scale */, dense_out /* out tensor */); diff --git a/paddle/fluid/eager/api/utils/tensor_utils.cc b/paddle/fluid/eager/api/utils/tensor_utils.cc index 628c0c500b3c4ade711f3b7ba6a9fa4b6b69a7c6..77c39d1b0a37c3946e4c170484118a5fb6f79170 100644 --- a/paddle/fluid/eager/api/utils/tensor_utils.cc +++ b/paddle/fluid/eager/api/utils/tensor_utils.cc @@ -22,7 +22,7 @@ #include "paddle/phi/api/all.h" #include "paddle/fluid/framework/data_layout.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/variable.h" namespace egr { @@ -43,7 +43,7 @@ paddle::experimental::Tensor CreateTensorWithValue( bool is_leaf) { paddle::experimental::Tensor out = paddle::experimental::full( phi::vectorize(ddim), paddle::experimental::Scalar(value), dtype, - phi::TransToPtenBackend(place)); + phi::TransToPhiBackend(place)); auto meta = EagerUtils::autograd_meta(&out); if (is_leaf) { diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 74c5bcdb20984e0e988e048e05ebf9a68979ce95..a8e0ed7a41a043e12332ad347f673a6c27e5f1ec 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -27,7 +27,7 @@ #include "paddle/fluid/pybind/pybind.h" #include "paddle/fluid/string/string_helper.h" -// pten +// phi #include "paddle/phi/kernels/declarations.h" #define NUM_CREATED_DUP_INPUTS 4 @@ -544,7 +544,7 @@ static bool CheckOpProto(proto::OpProto* op_proto) { // since only OperatorWithKernel can run in dygraph mode. auto& all_kernels = paddle::framework::OperatorWithKernel::AllOpKernels(); if (!all_kernels.count(op_type) && - !phi::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) { + !phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type)) { return false; } diff --git a/paddle/fluid/eager/eager_tensor.h b/paddle/fluid/eager/eager_tensor.h index 42a3a13e5f70aef673e17521bf2fc57ed3869550..41e57ef1a15b0181c23b8e3f4b1bba12218a24f7 100644 --- a/paddle/fluid/eager/eager_tensor.h +++ b/paddle/fluid/eager/eager_tensor.h @@ -14,10 +14,10 @@ #pragma once // framework deps -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/variable.h" -// pten deps +// Phi deps #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/lib/api_declare.h" #include "paddle/phi/api/lib/utils/tensor_utils.h" @@ -31,7 +31,7 @@ * provide variable in * paddle::framework::ExecutionContext to support it. We should remove this as * soon as we finish our latest - * Pten Lib, and use paddle::experimental::Tensor instead. + * Phi Lib, and use paddle::experimental::Tensor instead. * * Note: Keep this class as clean as possible. * This class should only support method declared in diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index 7464ad74135853a6d5f6b0f6eec3b950f527a599..a7e5931f1f9bc66006fb1a37836be1eda371953e 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -23,7 +23,7 @@ #include "paddle/phi/core/tensor_meta.h" #include "paddle/fluid/framework/data_layout.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/variable.h" PADDLE_DEFINE_EXPORTED_bool(retain_grad_for_all_tensor, true, diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 082c50817433283240e53234063315756d0c5ddf..14aecb5fd43c49ece1f79cb9c8e2b70e9d07df07 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -193,9 +193,9 @@ cc_library(unused_var_check SRCS unused_var_check.cc DEPS glog no_need_buffer_va cc_library(op_kernel_type SRCS op_kernel_type.cc DEPS device_context place) IF(WITH_XPU) -cc_library(phi_utils SRCS pten_utils.cc DEPS lod_tensor selected_rows_utils place phi var_type_traits phi_api_utils op_info xpu_op_list) +cc_library(phi_utils SRCS phi_utils.cc DEPS lod_tensor selected_rows_utils place phi var_type_traits phi_api_utils op_info xpu_op_list) ELSE() -cc_library(phi_utils SRCS pten_utils.cc DEPS lod_tensor selected_rows_utils place phi var_type_traits phi_api_utils op_info) +cc_library(phi_utils SRCS phi_utils.cc DEPS lod_tensor selected_rows_utils place phi var_type_traits phi_api_utils op_info) ENDIF() IF(WITH_XPU) @@ -450,7 +450,7 @@ if(WITH_TESTING AND TEST selected_rows_utils_test) endif() cc_test(scope_guard_test SRCS scope_guard_test.cc) -cc_test(phi_utils_test SRCS pten_utils_test.cc DEPS phi_utils) +cc_test(phi_utils_test SRCS phi_utils_test.cc DEPS phi_utils) if(WITH_GPU OR WITH_ROCM) cc_library(fluid_convert_utils SRCS convert_utils.cc DEPS data_type place gpu_info) diff --git a/paddle/fluid/framework/async_executor.cc b/paddle/fluid/framework/async_executor.cc index 81b6917587df9282d3ff59180e6fc079379cef60..ae3d8379bdbf779e2cf82d27c18997f82cb92095 100644 --- a/paddle/fluid/framework/async_executor.cc +++ b/paddle/fluid/framework/async_executor.cc @@ -33,7 +33,7 @@ limitations under the License. */ #include "paddle/fluid/platform/place.h" #include "paddle/fluid/pybind/pybind.h" -// pten +// phi #include "paddle/phi/kernels/declarations.h" namespace paddle { diff --git a/paddle/fluid/framework/convert_utils.cc b/paddle/fluid/framework/convert_utils.cc index 23cf4324086bd48f7a2a429bde26f7303e8d34b3..df5cc6d82042c262467b35f6a7cbe097a4ad7776 100644 --- a/paddle/fluid/framework/convert_utils.cc +++ b/paddle/fluid/framework/convert_utils.cc @@ -18,7 +18,7 @@ limitations under the License. */ namespace paddle { namespace framework { -paddle::experimental::DataType TransToPtenDataType( +paddle::experimental::DataType TransToPhiDataType( const paddle::framework::proto::VarType::Type& dtype) { // Set the order of case branches according to the frequency with // the data type is used diff --git a/paddle/fluid/framework/convert_utils.h b/paddle/fluid/framework/convert_utils.h index c94b5b2311c5202832e5fe00c702e14c56ada9b9..da2af86c77c477c3c70b220b47bc073b47645a5d 100644 --- a/paddle/fluid/framework/convert_utils.h +++ b/paddle/fluid/framework/convert_utils.h @@ -32,7 +32,7 @@ namespace framework { using DataType = paddle::experimental::DataType; using DataLayout = paddle::experimental::DataLayout; -DataType TransToPtenDataType( +DataType TransToPhiDataType( const paddle::framework::proto::VarType::Type& dtype); paddle::framework::proto::VarType::Type TransToProtoVarType( diff --git a/paddle/fluid/framework/convert_utils_test.cc b/paddle/fluid/framework/convert_utils_test.cc index 51b431f4b4a8a080f312f7d8bfdf12c1cdc44e4b..140806dfd7c5e1ae2746f3d116f418fea16fa1f3 100644 --- a/paddle/fluid/framework/convert_utils_test.cc +++ b/paddle/fluid/framework/convert_utils_test.cc @@ -43,35 +43,35 @@ TEST(ConvertUtils, DataType) { CHECK(paddle::framework::TransToProtoVarType(paddle::DataType::FLOAT16) == paddle::framework::proto::VarType::FP16); // proto -> enum - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::FP64) == paddle::DataType::FLOAT64); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::FP32) == paddle::DataType::FLOAT32); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::INT64) == paddle::DataType::INT64); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::INT32) == paddle::DataType::INT32); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::INT8) == paddle::DataType::INT8); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::UINT8) == paddle::DataType::UINT8); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::INT16) == paddle::DataType::INT16); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::BOOL) == paddle::DataType::BOOL); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::COMPLEX64) == paddle::DataType::COMPLEX64); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::COMPLEX128) == paddle::DataType::COMPLEX128); - CHECK(paddle::framework::TransToPtenDataType( + CHECK(paddle::framework::TransToPhiDataType( paddle::framework::proto::VarType::FP16) == paddle::DataType::FLOAT16); } diff --git a/paddle/fluid/framework/custom_operator.cc b/paddle/fluid/framework/custom_operator.cc index 597265bb2473fd14108b4fa11e7ae93957c4268b..b9e3bee25f6b5377dde7b525138643964fd8366a 100644 --- a/paddle/fluid/framework/custom_operator.cc +++ b/paddle/fluid/framework/custom_operator.cc @@ -30,7 +30,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_meta_info_helper.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/dynload/dynamic_loader.h" #include "paddle/fluid/string/string_helper.h" @@ -779,13 +779,13 @@ void RegisterOperatorWithMetaInfo(const std::vector& op_meta_infos, for (size_t i = 0; i < ctx->InputSize(in_name); ++i) { auto dtype = ctx->GetInputDataType(in_name, i); vec_custom_dtype.emplace_back( - paddle::framework::TransToPtenDataType(dtype)); + paddle::framework::TransToPhiDataType(dtype)); } vec_input_dtypes.emplace_back(vec_custom_dtype); } else { auto dtype = ctx->GetInputDataType(in_name); input_dtypes.emplace_back( - paddle::framework::TransToPtenDataType(dtype)); + paddle::framework::TransToPhiDataType(dtype)); } } diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index cf9e3de6c1a58a277e4508442c39a882ffa506b2..4757eb60f4361cffd9354afd4a8bf4bf99e86eb3 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/fluid/platform/init.h" #include "paddle/phi/kernels/funcs/math_function.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_type_test.cc b/paddle/fluid/framework/data_type_test.cc index 7152004b63de6deab22988a79917b536a0623c81..15cf30c1cf352324b57b8ca7bfcdf9d2d2640aea 100644 --- a/paddle/fluid/framework/data_type_test.cc +++ b/paddle/fluid/framework/data_type_test.cc @@ -28,7 +28,7 @@ TEST(DataType, float16) { Tensor tensor; CPUPlace cpu; - tensor.mutable_data(cpu, f::TransToPtenDataType(dtype)); + tensor.mutable_data(cpu, f::TransToPhiDataType(dtype)); // test fp16 tensor EXPECT_EQ(f::TransToProtoVarType(tensor.dtype()), @@ -51,7 +51,7 @@ TEST(DataType, bfloat16) { Tensor tensor; CPUPlace cpu; - tensor.mutable_data(cpu, f::TransToPtenDataType(dtype)); + tensor.mutable_data(cpu, f::TransToPhiDataType(dtype)); // test bf16 tensor EXPECT_EQ(f::TransToProtoVarType(tensor.dtype()), diff --git a/paddle/fluid/framework/executor_thread_worker.cc b/paddle/fluid/framework/executor_thread_worker.cc index 47ab1e0fc030a6897162a99e8eb4da5e34541c79..06019372a7323b3c61c067638da19b847eba9031 100644 --- a/paddle/fluid/framework/executor_thread_worker.cc +++ b/paddle/fluid/framework/executor_thread_worker.cc @@ -34,7 +34,7 @@ limitations under the License. */ #include "paddle/fluid/platform/timer.h" #include "paddle/fluid/pybind/pybind.h" -// pten +// phi #include "paddle/phi/kernels/declarations.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/fleet/heter_wrapper.cc b/paddle/fluid/framework/fleet/heter_wrapper.cc index 8c3c1e015262b70efb575b0d3a5ebcd662459170..84dcdad78298acbd74b2f2d23e81ceba4bd71a72 100644 --- a/paddle/fluid/framework/fleet/heter_wrapper.cc +++ b/paddle/fluid/framework/fleet/heter_wrapper.cc @@ -161,7 +161,7 @@ void HeterWrapper::DeSerializeToTensor(Scope* scope, tensor->set_lod(lod); void* tensor_data = tensor->mutable_data( - place, framework::TransToPtenDataType(ToVarType(req_var.data_type()))); + place, framework::TransToPhiDataType(ToVarType(req_var.data_type()))); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) memory::Copy(place, tensor_data, platform::CPUPlace(), req_var.data().data(), @@ -202,7 +202,7 @@ void HeterWrapper::DeSerializeToTensor(Scope* scope, tensor->set_lod(lod); void* tensor_data = tensor->mutable_data( - place, framework::TransToPtenDataType(ToVarType(req_var.data_type()))); + place, framework::TransToPhiDataType(ToVarType(req_var.data_type()))); #ifdef PADDLE_WITH_XPU memory::Copy(place, tensor_data, platform::CPUPlace(), req_var.data().data(), diff --git a/paddle/fluid/framework/heter_section_worker.cc b/paddle/fluid/framework/heter_section_worker.cc index 8aafd3459ed1a3d1673e482016c550e69c74a6cd..b6759bb2e6fe6c5a3688f3d72e84aabf0c1d2717 100644 --- a/paddle/fluid/framework/heter_section_worker.cc +++ b/paddle/fluid/framework/heter_section_worker.cc @@ -38,7 +38,7 @@ void SetMicroId(paddle::framework::Scope* scope, std::vector dims{1}; tensor->Resize(phi::make_ddim(dims)); void* tensor_data = tensor->mutable_data( - place, framework::TransToPtenDataType(framework::proto::VarType::FP32)); + place, framework::TransToPhiDataType(framework::proto::VarType::FP32)); if (platform::is_gpu_place(place)) { #ifdef PADDLE_WITH_CUDA std::vector temp; diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 0900ed2ff2f5d46c9705885e0847c92249091afc..e14b91d935d05c12442f3d0205c1e97df9697ec3 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/phi/common/scalar.h" #include "paddle/phi/common/scalar_array.h" @@ -144,7 +144,7 @@ class CompatMetaTensor : public phi::MetaTensor { } } else { auto* var = BOOST_GET_CONST(VarDesc*, var_); - return paddle::framework::TransToPtenDataType(var->GetDataType()); + return paddle::framework::TransToPhiDataType(var->GetDataType()); } } @@ -341,10 +341,10 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, } if (infershape_inputs.size() != 1) { infer_meta_context.EmplaceBackAttr( - std::move(experimental::MakePtenScalarArrayFromVarList(vars))); + std::move(experimental::MakePhiScalarArrayFromVarList(vars))); } else { infer_meta_context.EmplaceBackAttr( - std::move(experimental::MakePtenScalarArrayFromVar(*vars[0]))); + std::move(experimental::MakePhiScalarArrayFromVar(*vars[0]))); } } else { // If is not in runtime, we will set default value(-1) for ScalarArray @@ -419,7 +419,7 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, if (ctx->IsRuntime()) { Variable* var = BOOST_GET_CONST(Variable*, infershape_input[0]); infer_meta_context.EmplaceBackAttr( - std::move(experimental::MakePtenScalarFromVar(*var))); + std::move(experimental::MakePhiScalarFromVar(*var))); } else { phi::Scalar tensor_scalar(-1); tensor_scalar.SetFromTensor(true); @@ -481,7 +481,7 @@ phi::InferMetaContext BuildInferMetaContext(InferShapeContext* ctx, BOOST_GET_CONST(std::vector, attr)); } else if (attr_defs[i].type_index == std::type_index(typeid(phi::DataType))) { - auto data_type = paddle::framework::TransToPtenDataType( + auto data_type = paddle::framework::TransToPhiDataType( static_cast( BOOST_GET_CONST(int, attr))); infer_meta_context.EmplaceBackAttr(data_type); diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc index ec5d48b3093f7c73bffa0196ccd75e11a89baeac..26ee02ff1812d2e73d0be3bed762d1a4ae4ac6c7 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc @@ -276,13 +276,13 @@ bool FuseOptimizerOpPass::OpWithKernelSupportCPUAndGPU( bool support_gpu = false; auto &kernel_factory = phi::KernelFactory::Instance(); auto kernel_key_map = - kernel_factory.SelectKernelMap(phi::TransToPtenKernelName(op_type)); + kernel_factory.SelectKernelMap(phi::TransToPhiKernelName(op_type)); bool has_op_kernel = kernel_key_map.size() > 0 ? true : false; for (auto &kernel : kernel_key_map) { - if (platform::is_gpu_place(phi::TransToPtenPlace(kernel.first.backend()))) { + if (platform::is_gpu_place(phi::TransToPhiPlace(kernel.first.backend()))) { support_gpu = true; } else if (platform::is_cpu_place( - phi::TransToPtenPlace(kernel.first.backend()))) { + phi::TransToPhiPlace(kernel.first.backend()))) { support_cpu = true; } } diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc index dafcc9c4e16a3ee43df17c1c0d650288c31b18b8..e9850483ebe913e298dc7501ed4155fb0dfc2879 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc @@ -96,7 +96,7 @@ void InitTensorHolder(Scope* scope, const paddle::platform::Place& place, auto x = scope->Var(var_name); auto tensor = x->GetMutable(); tensor->mutable_data(place, - framework::TransToPtenDataType(proto::VarType::FP32), 1); + framework::TransToPhiDataType(proto::VarType::FP32), 1); } void MainTest(bool convWithExistingBias) { diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc index 3a78c229bd8fa83ff4c4d96ff270f20f131ab52b..889417b78c8641060b8ad89219749d8400558c6a 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc @@ -126,7 +126,7 @@ void InitTensorHolder(Scope* scope, const paddle::platform::Place& place, auto x = scope->Var(var_name); auto tensor = x->GetMutable(); tensor->mutable_data(place, - framework::TransToPtenDataType(proto::VarType::FP32), 1); + framework::TransToPhiDataType(proto::VarType::FP32), 1); } void PreparePass(std::unique_ptr* graph, const ProgramDesc& prog, diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc index e00bb84e35c09eb987b2470c041545cf7f53e4ea..0506bfaf447ac68368d7d8f2a87014a6234c444c 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc @@ -526,7 +526,7 @@ void InitTensorHolder(Scope* scope, const paddle::platform::Place& place, auto x = scope->Var(var_name); auto tensor = x->GetMutable(); tensor->mutable_data(place, - framework::TransToPtenDataType(proto::VarType::FP32), 1); + framework::TransToPhiDataType(proto::VarType::FP32), 1); } void PrepareGraph(std::unique_ptr* graph, const ProgramDesc& prog) { diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index a9e0b9c98b46f39b98a6bdce1fc12bbc3321ef00..56f9e6842373b3eba7d2d71b84adbf17ad291254 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -447,7 +447,7 @@ void MergeLoDTensor(LoDTensor *target, target->set_layout(new_layout); target->set_lod(new_lod); target->mutable_data(dst_place, - paddle::framework::TransToPtenDataType(new_type)); + paddle::framework::TransToPhiDataType(new_type)); int begin = 0; for (auto *src : lod_tensors) { diff --git a/paddle/fluid/framework/new_executor/interpretercore.cc b/paddle/fluid/framework/new_executor/interpretercore.cc index 766a3b9e495d521db3d628d170fb13fa32bdebb2..9b597a9efde8aa524bb665ff5f9b53ae5641b076 100644 --- a/paddle/fluid/framework/new_executor/interpretercore.cc +++ b/paddle/fluid/framework/new_executor/interpretercore.cc @@ -416,18 +416,18 @@ void InterpreterCore::RunInstruction(const Instruction& instr_node) { if (op_with_kernel == nullptr) { instr_node.OpBase()->Run(*local_scope, place_); } else { - // fit for pten - if (instr_node.PtenKernel() && instr_node.PtenKernel()->IsValid()) { - VLOG(4) << "Run pten kernel: " << op->Type(); + // fit for phi + if (instr_node.PhiKernel() && instr_node.PhiKernel()->IsValid()) { + VLOG(4) << "Run phi kernel: " << op->Type(); VLOG(4) << instr_node.InnerRuntimeContext().get() << " " << &instr_node.DeviceContext(); phi::KernelContext pt_kernel_context; - op_with_kernel->BuildPtenKernelContext( + op_with_kernel->BuildPhiKernelContext( *instr_node.InnerRuntimeContext().get(), const_cast(&instr_node.DeviceContext()), &pt_kernel_context); - (*instr_node.PtenKernel())(&pt_kernel_context); + (*instr_node.PhiKernel())(&pt_kernel_context); } else { instr_node.KernelFunc()(*instr_node.InnerExecutionContext().get()); diff --git a/paddle/fluid/framework/new_executor/interpretercore_util.cc b/paddle/fluid/framework/new_executor/interpretercore_util.cc index 0767dde4392b89d57539ad697f5b64d2090b0fcd..d595af58257d4f6e0f6bd1fd009ab78e181f96f7 100644 --- a/paddle/fluid/framework/new_executor/interpretercore_util.cc +++ b/paddle/fluid/framework/new_executor/interpretercore_util.cc @@ -407,14 +407,14 @@ void build_op_func_list(const platform::Place& place, auto exec_ctx = ExecutionContext(*op_with_kernel, scope, *dev_ctx, runtime_context); - auto run_pten_kernel = false; - if (phi::KernelFactory::Instance().HasCompatiblePtenKernel( + auto run_phi_kernel = false; + if (phi::KernelFactory::Instance().HasCompatiblePhiKernel( op_with_kernel->Type())) { - auto pt_kernel_key = op_with_kernel->ChoosePtenKernel(exec_ctx); - auto pt_kernel_name = op_with_kernel->PtenKernelSignature()->name; + auto pt_kernel_key = op_with_kernel->ChoosePhiKernel(exec_ctx); + auto pt_kernel_name = op_with_kernel->PhiKernelSignature()->name; - if (op_with_kernel->PtenKernel()->IsValid()) { - run_pten_kernel = true; + if (op_with_kernel->PhiKernel()->IsValid()) { + run_phi_kernel = true; } else { auto kernels_iter = all_op_kernels.find(op_with_kernel->Type()); if (kernels_iter == all_op_kernels.end() || @@ -422,26 +422,26 @@ void build_op_func_list(const platform::Place& place, kernels_iter->second.end()) { auto pt_cpu_kernel_key = FallBackToCpu( expected_kernel_key, pt_kernel_key, *op_with_kernel); - op_with_kernel->ResetPtenKernel( + op_with_kernel->ResetPhiKernel( new phi::Kernel(phi::KernelFactory::Instance().SelectKernel( pt_kernel_name, pt_cpu_kernel_key))); - if (op_with_kernel->PtenKernel()->IsValid()) { + if (op_with_kernel->PhiKernel()->IsValid()) { VLOG(6) << "Static mode PrepareImpl - kernel name: " << pt_kernel_name << " | kernel key: " << pt_cpu_kernel_key - << " | kernel: " << *(op_with_kernel->PtenKernel()); - run_pten_kernel = true; + << " | kernel: " << *(op_with_kernel->PhiKernel()); + run_phi_kernel = true; } } } } VLOG(3) << op_with_kernel->Type() << " : expected_kernel_key : " << expected_kernel_key; - if (run_pten_kernel) { + if (run_phi_kernel) { phi::KernelContext pt_kernel_context; - op_with_kernel->BuildPtenKernelContext(runtime_context, dev_ctx, - &pt_kernel_context); - op_func_node.pt_kernel_ = op_with_kernel->PtenKernel(); + op_with_kernel->BuildPhiKernelContext(runtime_context, dev_ctx, + &pt_kernel_context); + op_func_node.pt_kernel_ = op_with_kernel->PhiKernel(); (*op_func_node.pt_kernel_)(&pt_kernel_context); } else { diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.cc b/paddle/fluid/framework/new_executor/new_executor_defs.cc index 1fbe4500ac6dff261cc38e33ad90bfd92b83ad39..35bac4393170331486298a29f1b6be26065ad864 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.cc +++ b/paddle/fluid/framework/new_executor/new_executor_defs.cc @@ -688,9 +688,7 @@ OpKernelComputeFunc Instruction::KernelFunc() const { return op_func_node_.kernel_func_; } -phi::Kernel* Instruction::PtenKernel() const { - return op_func_node_.pt_kernel_; -} +phi::Kernel* Instruction::PhiKernel() const { return op_func_node_.pt_kernel_; } OpFuncType Instruction::KernelType() const { return op_func_node_.type_; } diff --git a/paddle/fluid/framework/new_executor/new_executor_defs.h b/paddle/fluid/framework/new_executor/new_executor_defs.h index 93b9aee4f32cbfa88c0a79d4018b3a2ca03cf035..dc34bd2c69411837b6130b87dba1753687cf82f8 100644 --- a/paddle/fluid/framework/new_executor/new_executor_defs.h +++ b/paddle/fluid/framework/new_executor/new_executor_defs.h @@ -300,7 +300,7 @@ struct OpFuncNode { OpKernelComputeFunc kernel_func_; platform::DeviceContext* dev_ctx_; // not owned - // fit for pten kernel + // fit for phi kernel phi::Kernel* pt_kernel_{nullptr}; // not owned OpFuncType type_; @@ -321,7 +321,7 @@ class Instruction { OpKernelComputeFunc KernelFunc() const; - phi::Kernel* PtenKernel() const; + phi::Kernel* PhiKernel() const; OpFuncType KernelType() const; diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index b7332896818c980a22a0f8f1648174caad6e3194..d33791f70c4d2f759bcd4f6443a5a1f244673d4f 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -24,7 +24,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/details/nan_inf_utils.h" #include "paddle/fluid/framework/op_call_stack.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/shape_inference.h" #include "paddle/fluid/framework/transfer_scope_cache.h" #include "paddle/fluid/framework/unused_var_check.h" @@ -616,9 +616,9 @@ bool OpSupportGPU(const std::string& op_type) { // check in new Function kernel first auto& kernel_factory = phi::KernelFactory::Instance(); auto kernel_key_map = - kernel_factory.SelectKernelMap(phi::TransToPtenKernelName(op_type)); + kernel_factory.SelectKernelMap(phi::TransToPhiKernelName(op_type)); for (auto& kernel : kernel_key_map) { - if (platform::is_gpu_place(phi::TransToPtenPlace(kernel.first.backend()))) { + if (platform::is_gpu_place(phi::TransToPhiPlace(kernel.first.backend()))) { return true; } } @@ -1186,10 +1186,10 @@ void OperatorWithKernel::RunImpl(const Scope& scope, // phase phi::KernelKey pt_kernel_key; std::string pt_kernel_name; - if (phi::KernelFactory::Instance().HasCompatiblePtenKernel(type_)) { + if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(type_)) { if (pt_kernel_signature_ == nullptr || pt_kernel_ == nullptr) { pt_kernel_signature_.reset( - new KernelSignature(std::move(GetExpectedPtenKernelArgs(exe_ctx)))); + new KernelSignature(std::move(GetExpectedPhiKernelArgs(exe_ctx)))); VLOG(6) << *pt_kernel_signature_.get(); kernel_type_.reset( @@ -1197,17 +1197,17 @@ void OperatorWithKernel::RunImpl(const Scope& scope, dev_ctx = pool.Get(kernel_type_->place_); pt_kernel_name = pt_kernel_signature_->name; - pt_kernel_key = TransOpKernelTypeToPtenKernelKey(*kernel_type_.get()); + pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get()); pt_kernel_.reset( new phi::Kernel(phi::KernelFactory::Instance().SelectKernel( pt_kernel_name, pt_kernel_key))); if (pt_kernel_->IsValid()) { - VLOG(6) << "Static mode ChoosePtenKernel - kernel name: " + VLOG(6) << "Static mode ChoosePhiKernel - kernel name: " << pt_kernel_name << " | kernel key: " << pt_kernel_key << " | kernel: " << *pt_kernel_; } else { - VLOG(6) << "Static mode ChoosePtenKernel - kernel `" << pt_kernel_name + VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << pt_kernel_name << "` not found."; } } @@ -1222,7 +1222,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, && !is_xpu_unsupport #endif ) { - run_pten_kernel_ = true; + run_phi_kernel_ = true; } else { auto& all_op_kernels = AllOpKernels(); auto kernels_iter = all_op_kernels.find(type_); @@ -1244,12 +1244,12 @@ void OperatorWithKernel::RunImpl(const Scope& scope, VLOG(6) << "Static mode PrepareImpl - kernel name: " << pt_kernel_name << " | kernel key: " << pt_cpu_kernel_key << " | kernel: " << *pt_kernel_; - run_pten_kernel_ = true; + run_phi_kernel_ = true; } } } } - if (!run_pten_kernel_) { + if (!run_phi_kernel_) { if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) { ChooseKernel(exe_ctx); dev_ctx = pool.Get(kernel_type_->place_); @@ -1290,13 +1290,13 @@ void OperatorWithKernel::RunImpl(const Scope& scope, platform::RecordEvent record_event("compute", platform::TracerEventType::OperatorInner, 1, platform::EventRole::kInnerOp); - if (run_pten_kernel_) { + if (run_phi_kernel_) { phi::KernelContext pt_kernel_context; // Do data transform before building KernelContext // TODO(zhiqiu): support TransferInplaceVarsBack - PreparePtenData(exec_scope, *pt_kernel_, *pt_kernel_signature_, - runtime_ctx); - BuildPtenKernelContext(*runtime_ctx, dev_ctx, &pt_kernel_context); + PreparePhiData(exec_scope, *pt_kernel_, *pt_kernel_signature_, + runtime_ctx); + BuildPhiKernelContext(*runtime_ctx, dev_ctx, &pt_kernel_context); (*pt_kernel_)(&pt_kernel_context); } else { (*kernel_func_)( @@ -1388,26 +1388,26 @@ OpKernelType OperatorWithKernel::InnerGetExpectedKernelType( return expected_kernel_key; } -phi::KernelKey OperatorWithKernel::ChoosePtenKernel( +phi::KernelKey OperatorWithKernel::ChoosePhiKernel( const ExecutionContext& ctx) const { pt_kernel_signature_.reset( - new KernelSignature(std::move(GetExpectedPtenKernelArgs(ctx)))); + new KernelSignature(std::move(GetExpectedPhiKernelArgs(ctx)))); VLOG(6) << *pt_kernel_signature_.get(); kernel_type_.reset( new OpKernelType(std::move(InnerGetExpectedKernelType(ctx)))); auto pt_kernel_name = pt_kernel_signature_->name; - auto pt_kernel_key = TransOpKernelTypeToPtenKernelKey(*kernel_type_.get()); + auto pt_kernel_key = TransOpKernelTypeToPhiKernelKey(*kernel_type_.get()); pt_kernel_.reset(new phi::Kernel(phi::KernelFactory::Instance().SelectKernel( pt_kernel_name, pt_kernel_key))); if (pt_kernel_->IsValid()) { - VLOG(6) << "Static mode ChoosePtenKernel - kernel name: " << pt_kernel_name + VLOG(6) << "Static mode ChoosePhiKernel - kernel name: " << pt_kernel_name << " | kernel key: " << pt_kernel_key << " | kernel: " << *pt_kernel_; } else { - VLOG(6) << "Static mode ChoosePtenKernel - kernel `" << pt_kernel_name + VLOG(6) << "Static mode ChoosePhiKernel - kernel `" << pt_kernel_name << "` not found."; } return pt_kernel_key; @@ -1918,7 +1918,7 @@ OpKernelType OperatorWithKernel::GetKernelTypeForVar( tensor.layout()); } -KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs( +KernelSignature OperatorWithKernel::GetExpectedPhiKernelArgs( const ExecutionContext& ctx) const { InitDefaultKernelSignatureMap(); ExecutionArgumentMappingContext arg_mapping_ctx(ctx); @@ -1926,7 +1926,7 @@ KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs( arg_mapping_ctx); } -Scope* OperatorWithKernel::PreparePtenData( +Scope* OperatorWithKernel::PreparePhiData( const Scope& scope, const phi::Kernel& pt_kernel, const KernelSignature& pt_kernel_signature, RuntimeContext* ctx) const { auto& input_names = std::get<0>(pt_kernel_signature.args); @@ -1981,12 +1981,12 @@ Scope* OperatorWithKernel::PreparePtenData( if (in_def.backend == phi::Backend::ALL_BACKEND) { continue; } - auto expected_place = phi::TransToPtenPlace(in_def.backend); + auto expected_place = phi::TransToPhiPlace(in_def.backend); if (platform::is_same_place(tensor_in->place(), expected_place)) { continue; } - VLOG(3) << "PTen Transform Variable " << input_names[i] << " from " + VLOG(3) << "phi Transform Variable " << input_names[i] << " from " << tensor_in->place() << " to " << expected_place; if (!new_scope) { @@ -2007,7 +2007,7 @@ Scope* OperatorWithKernel::PreparePtenData( return new_scope; } -void OperatorWithKernel::BuildPtenKernelContext( +void OperatorWithKernel::BuildPhiKernelContext( const RuntimeContext& ctx, platform::DeviceContext* dev_ctx, phi::KernelContext* pt_kernel_context) const { pt_kernel_context->SetDeviceContext(dev_ctx); @@ -2111,7 +2111,7 @@ void OperatorWithKernel::BuildPtenKernelContext( experimental::ResetTensorDtypeAndLayoutByArgDef(tensor_out, output_defs.at(i)); SetAllocationForOutputTenosr( - tensor_out, phi::TransToPtenPlace(output_defs.at(i).backend)); + tensor_out, phi::TransToPhiPlace(output_defs.at(i).backend)); pt_kernel_context->EmplaceBackOutputWithoutSetRange(tensor_out); } @@ -2145,10 +2145,10 @@ void OperatorWithKernel::BuildPtenKernelContext( auto& ins_vector = ctx.inputs.at(attr_names[i]); if (ins_vector.size() == 1) { // ShapeTensor pt_kernel_context->EmplaceBackAttr(std::move( - experimental::MakePtenScalarArrayFromVar(*ins_vector.front()))); + experimental::MakePhiScalarArrayFromVar(*ins_vector.front()))); } else { // ShapeTensorList pt_kernel_context->EmplaceBackAttr(std::move( - experimental::MakePtenScalarArrayFromVarList(ins_vector))); + experimental::MakePhiScalarArrayFromVarList(ins_vector))); } } } else if (attr_defs[i].type_index == @@ -2178,8 +2178,8 @@ void OperatorWithKernel::BuildPtenKernelContext( } } else { auto& ins_vector = ctx.inputs.at(attr_names[i]); - pt_kernel_context->EmplaceBackAttr(std::move( - experimental::MakePtenScalarFromVar(*ins_vector.front()))); + pt_kernel_context->EmplaceBackAttr( + std::move(experimental::MakePhiScalarFromVar(*ins_vector.front()))); } } else { @@ -2198,7 +2198,7 @@ void OperatorWithKernel::BuildPtenKernelContext( pt_kernel_context->EmplaceBackAttr(BOOST_GET_CONST(std::string, attr)); } else if (attr_defs[i].type_index == std::type_index(typeid(phi::DataType))) { - auto data_type = paddle::framework::TransToPtenDataType( + auto data_type = paddle::framework::TransToPhiDataType( static_cast( BOOST_GET_CONST(int, attr))); pt_kernel_context->EmplaceBackAttr(data_type); @@ -2206,7 +2206,7 @@ void OperatorWithKernel::BuildPtenKernelContext( std::type_index(typeid(std::vector))) { if (std::type_index(attr.type()) == std::type_index(typeid(std::vector))) { - // Emplace Back Attr according to the type of Pten_Kernel args. + // Emplace Back Attr according to the type of Phi_Kernel args. const auto& vector_int_attr = BOOST_GET_CONST(std::vector, attr); const std::vector vector_int64_attr(vector_int_attr.begin(), vector_int_attr.end()); diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index ff9cb8a287a26210cb585c1c58dcb20e860af880..16718a316513e3574e9a7eb14ed50106c8b0dcb6 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -30,7 +30,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_kernel_type.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/selected_rows_utils.h" #include "paddle/fluid/framework/tensor.h" @@ -423,7 +423,7 @@ class ExecutionContext { "size(%d).", allocation_ptr->size(), phi::product(dim) * sizeof(T))); - paddle::framework::Tensor temp_tensor(framework::TransToPtenDataType( + paddle::framework::Tensor temp_tensor(framework::TransToPhiDataType( framework::ToDataType(std::type_index(typeid(T))))); temp_tensor.Resize(dim); temp_tensor.ResetHolder(std::move(shared_allocation)); @@ -538,14 +538,14 @@ class OperatorWithKernel : public OperatorBase { } bool SupportGPU() const override { - auto pten_kernels = phi::KernelFactory::Instance().SelectKernelMap( - phi::TransToPtenKernelName(type_)); - auto has_pten_kernel = - std::any_of(pten_kernels.begin(), pten_kernels.end(), + auto phi_kernels = phi::KernelFactory::Instance().SelectKernelMap( + phi::TransToPhiKernelName(type_)); + auto has_phi_kernel = + std::any_of(phi_kernels.begin(), phi_kernels.end(), [](phi::KernelKeyMap::const_reference kern_pair) { return kern_pair.first.backend() == phi::Backend::GPU; }); - if (has_pten_kernel) { + if (has_phi_kernel) { return true; } else { auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_); @@ -558,7 +558,7 @@ class OperatorWithKernel : public OperatorBase { } bool SupportNPU() const override { - // TODO(zhiqiu): support pten if needed? + // TODO(zhiqiu): support phi if needed? auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_); return std::any_of(op_kernels.begin(), op_kernels.end(), [](OpKernelMap::const_reference kern_pair) { @@ -566,7 +566,7 @@ class OperatorWithKernel : public OperatorBase { }); } bool SupportMLU() const override { - // TODO(zhiqiu): support pten if needed? + // TODO(zhiqiu): support phi if needed? auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_); return std::any_of(op_kernels.begin(), op_kernels.end(), [](OpKernelMap::const_reference kern_pair) { @@ -603,39 +603,39 @@ class OperatorWithKernel : public OperatorBase { return kernel_type_->place_; } - /* member functions for adapting to pten lib */ + /* member functions for adapting to phi lib */ /** In the Tensor calculation library, the new Kernel adopts a clearer and * more streamlined design. The arguments of the Kernel and the input and * output arguments registered in the original OpMaker do not match in some * cases, so we use map to record the arguments required by the kernel. * When selecting Kernel during Op execution, select the arguments of the - * original Op according to the GetExpectedPtenKernelArgs returned arguments. + * original Op according to the GetExpectedPhiKernelArgs returned arguments. */ - phi::KernelSignature GetExpectedPtenKernelArgs( + phi::KernelSignature GetExpectedPhiKernelArgs( const ExecutionContext& ctx) const; - /* member functions for adapting to pten lib */ - phi::KernelKey ChoosePtenKernel(const ExecutionContext& ctx) const; + /* member functions for adapting to phi lib */ + phi::KernelKey ChoosePhiKernel(const ExecutionContext& ctx) const; /** - * Transfer data place for pten kernel + * Transfer data place for phi kernel * Is this really needed? */ - Scope* PreparePtenData(const Scope& scope, const phi::Kernel& pt_kernel, - const phi::KernelSignature& pt_kernel_signature, - RuntimeContext* ctx) const; + Scope* PreparePhiData(const Scope& scope, const phi::Kernel& pt_kernel, + const phi::KernelSignature& pt_kernel_signature, + RuntimeContext* ctx) const; - void BuildPtenKernelContext(const RuntimeContext& ctx, - platform::DeviceContext* dev_ctx, - phi::KernelContext* pt_kernel_context) const; + void BuildPhiKernelContext(const RuntimeContext& ctx, + platform::DeviceContext* dev_ctx, + phi::KernelContext* pt_kernel_context) const; - phi::KernelSignature* PtenKernelSignature() const { + phi::KernelSignature* PhiKernelSignature() const { return pt_kernel_signature_.get(); } - phi::Kernel* PtenKernel() const { return pt_kernel_.get(); } + phi::Kernel* PhiKernel() const { return pt_kernel_.get(); } - void ResetPtenKernel(phi::Kernel* kernel) const { + void ResetPhiKernel(phi::Kernel* kernel) const { return pt_kernel_.reset(kernel); } @@ -692,9 +692,9 @@ class OperatorWithKernel : public OperatorBase { mutable std::mutex cache_update_mutex_; mutable bool enable_cache_transfer_scope_ = false; // NOTE(chenweihang): Similar op members are used to adapt to - // new pten kernel, if there is a better design in the future, + // new phi kernel, if there is a better design in the future, // we may polish the implementation here - mutable bool run_pten_kernel_ = false; + mutable bool run_phi_kernel_ = false; mutable bool run_kp_kernel = false; mutable std::unique_ptr pt_kernel_signature_; mutable std::unique_ptr pt_kernel_; diff --git a/paddle/fluid/framework/paddle2cinn/cinn_graph_symbolization_test.cc b/paddle/fluid/framework/paddle2cinn/cinn_graph_symbolization_test.cc index 09bca4a735461914e203cd479f45d000985a37b4..c0e1ca8f0d123379f3363afc45dd083b4a5dc951 100644 --- a/paddle/fluid/framework/paddle2cinn/cinn_graph_symbolization_test.cc +++ b/paddle/fluid/framework/paddle2cinn/cinn_graph_symbolization_test.cc @@ -209,7 +209,7 @@ class CinnGraphSymbolizationTest : public ::testing::Test { tensor.Resize(dims); tensor.mutable_data( platform::CPUPlace(), - framework::TransToPtenDataType(framework::proto::VarType::FP32)); + framework::TransToPhiDataType(framework::proto::VarType::FP32)); return tensor; }; #define FillFeedList(Name) feed_targets[#Name] = create_tensor(); diff --git a/paddle/fluid/framework/pten_utils.cc b/paddle/fluid/framework/phi_utils.cc similarity index 85% rename from paddle/fluid/framework/pten_utils.cc rename to paddle/fluid/framework/phi_utils.cc index af9d62ff7a845a394bf360b18cef528942cf739e..355291beb60f949b52b681592d42b7da4e80186b 100644 --- a/paddle/fluid/framework/pten_utils.cc +++ b/paddle/fluid/framework/phi_utils.cc @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/convert_utils.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_info.h" @@ -57,12 +57,11 @@ class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker { paddle::SmallVector attr_names_; }; -OpKernelType TransPtenKernelKeyToOpKernelType( - const phi::KernelKey& kernel_key) { +OpKernelType TransPhiKernelKeyToOpKernelType(const phi::KernelKey& kernel_key) { proto::VarType::Type data_type = paddle::framework::TransToProtoVarType(kernel_key.dtype()); // no need to set current device id here - platform::Place place = phi::TransToPtenPlace(kernel_key.backend(), false); + platform::Place place = phi::TransToPhiPlace(kernel_key.backend(), false); DataLayout data_layout = kernel_key.layout(); LibraryType library_type = LibraryType::kPlain; if (kernel_key.backend() == phi::Backend::MKLDNN) { @@ -76,9 +75,9 @@ OpKernelType TransPtenKernelKeyToOpKernelType( return OpKernelType(data_type, place, data_layout, library_type); } -phi::KernelKey TransOpKernelTypeToPtenKernelKey( +phi::KernelKey TransOpKernelTypeToPhiKernelKey( const OpKernelType& kernel_type) { - phi::Backend backend = phi::TransToPtenBackend(kernel_type.place_); + phi::Backend backend = phi::TransToPhiBackend(kernel_type.place_); if (kernel_type.library_type_ == LibraryType::kMKLDNN) { backend = phi::Backend::MKLDNN; } else if (kernel_type.library_type_ == LibraryType::kCUDNN) { @@ -88,7 +87,7 @@ phi::KernelKey TransOpKernelTypeToPtenKernelKey( } paddle::experimental::DataLayout layout = kernel_type.data_layout_; paddle::experimental::DataType dtype = - paddle::framework::TransToPtenDataType(kernel_type.data_type_); + paddle::framework::TransToPhiDataType(kernel_type.data_type_); return phi::KernelKey(backend, layout, dtype); } @@ -98,8 +97,8 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, #ifdef PADDLE_WITH_XPU if (platform::is_xpu_place(expected_kernel_key.place_) || paddle::platform::is_in_xpu_black_list(op.Type())) { - VLOG(3) << "pten missing XPU kernel: " << op.Type() - << ", expected_kernel_key:" << expected_kernel_key + VLOG(3) << "phi missing XPU kernel: " << op.Type() + << "phipected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; return phi::KernelKey(phi::Backend::CPU, kernel_key.layout(), kernel_key.dtype()); @@ -107,8 +106,8 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, #endif #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(expected_kernel_key.place_)) { - VLOG(3) << "pten missing NPU kernel: " << op.Type() - << ", expected_kernel_key:" << expected_kernel_key + VLOG(3) << "phi missing NPU kernel: " << op.Type() + << "phipected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; return phi::KernelKey(phi::Backend::CPU, kernel_key.layout(), kernel_key.dtype()); @@ -116,8 +115,8 @@ phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, #endif #ifdef PADDLE_WITH_MLU if (platform::is_mlu_place(expected_kernel_key.place_)) { - VLOG(3) << "pten missing MLU kernel: " << op.Type() - << ", expected_kernel_key:" << expected_kernel_key + VLOG(3) << "phi missing MLU kernel: " << op.Type() + << "phipected_kernel_key:" << expected_kernel_key << ", fallbacking to CPU one!"; return phi::KernelKey(phi::Backend::CPU, kernel_key.layout(), kernel_key.dtype()); @@ -132,17 +131,17 @@ KernelArgsNameMakerByOpProto::GetInputArgsNames() { auto& in = op_proto_->inputs()[i]; auto& in_name = in.name(); if ((in.has_extra() && in.extra()) || (in.has_quant() && in.quant())) { - VLOG(6) << "Parse PtenKernel input: skip extra & quant input - " + VLOG(6) << "Parse PhiKernel input: skip extra & quant input - " << in_name; continue; } // If contains dispensable input, we should override the // OpArgumentMapping method self in phi/ops/compat dir if (in.has_dispensable() && in.dispensable()) { - VLOG(6) << "Parse PtenKernel input: skip dispensable input - " << in_name; + VLOG(6) << "Parse PhiKernel input: skip dispensable input - " << in_name; continue; } - VLOG(6) << "Parse PtenKernel input: " << in_name; + VLOG(6) << "Parse PhiKernel input: " << in_name; input_names_.emplace_back(in_name); } return input_names_; @@ -154,11 +153,11 @@ KernelArgsNameMakerByOpProto::GetOutputArgsNames() { auto& out = op_proto_->outputs()[i]; auto& out_name = out.name(); if ((out.has_extra() && out.extra()) || (out.has_quant() && out.quant())) { - VLOG(6) << "Parse PtenKernel output: skip extra & quant output - " + VLOG(6) << "Parse PhiKernel output: skip extra & quant output - " << out_name; continue; } - VLOG(6) << "Parse PtenKernel output: " << out_name; + VLOG(6) << "Parse PhiKernel output: " << out_name; output_names_.emplace_back(out_name); } return output_names_; @@ -173,17 +172,17 @@ KernelArgsNameMakerByOpProto::GetAttrsArgsNames() { attr_name == "op_role" || attr_name == "op_role_var" || attr_name == "op_namescope" || attr_name == "op_callstack" || attr_name == "op_device") { - VLOG(6) << "Parse PtenKernel attribute: skip needless attr - " + VLOG(6) << "Parse PhiKernel attribute: skip needless attr - " << attr_name; continue; } if ((attr.has_extra() && attr.extra()) || (attr.has_quant() && attr.quant())) { - VLOG(6) << "Parse PtenKernel attribute: skip extra & quant attr - " + VLOG(6) << "Parse PhiKernel attribute: skip extra & quant attr - " << attr_name; continue; } - VLOG(6) << "Parse PtenKernel attribute: " << attr_name; + VLOG(6) << "Parse PhiKernel attribute: " << attr_name; attr_names_.emplace_back(attr_name); } @@ -191,7 +190,7 @@ KernelArgsNameMakerByOpProto::GetAttrsArgsNames() { } KernelSignature KernelArgsNameMakerByOpProto::GetKernelSignature() { - return KernelSignature(phi::TransToPtenKernelName(op_proto_->type()), + return KernelSignature(phi::TransToPhiKernelName(op_proto_->type()), GetInputArgsNames(), GetAttrsArgsNames(), GetOutputArgsNames()); } @@ -203,7 +202,7 @@ void InitDefaultKernelSignatureMap() { for (const auto& pair : paddle::framework::OpInfoMap::Instance().map()) { const auto& op_type = pair.first; const auto* op_proto = pair.second.proto_; - if (phi::KernelFactory::Instance().HasCompatiblePtenKernel(op_type) && + if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type) && op_proto) { paddle::framework::KernelArgsNameMakerByOpProto maker(op_proto); VLOG(10) << "Register kernel signature for " << op_type; diff --git a/paddle/fluid/framework/pten_utils.h b/paddle/fluid/framework/phi_utils.h similarity index 87% rename from paddle/fluid/framework/pten_utils.h rename to paddle/fluid/framework/phi_utils.h index 1bcffbcc3143547eb1df0975c9e2163bfebed02e..1a1f79d82770058ae4010b7a3a3162280ceb1537 100644 --- a/paddle/fluid/framework/pten_utils.h +++ b/paddle/fluid/framework/phi_utils.h @@ -44,9 +44,8 @@ using KernelSignature = phi::KernelSignature; /* Kernel Key translate */ -OpKernelType TransPtenKernelKeyToOpKernelType(const phi::KernelKey& kernel_key); -phi::KernelKey TransOpKernelTypeToPtenKernelKey( - const OpKernelType& kernel_type); +OpKernelType TransPhiKernelKeyToOpKernelType(const phi::KernelKey& kernel_key); +phi::KernelKey TransOpKernelTypeToPhiKernelKey(const OpKernelType& kernel_type); phi::KernelKey FallBackToCpu(const OpKernelType& expected_kernel_key, const phi::KernelKey& kernel_key, const framework::OperatorBase& op); @@ -68,25 +67,25 @@ void SetAllocationForOutputTenosr(phi::TensorBase* tensor, // TODO(Wilber): support others device context. template -struct ConvertToPtenContext { +struct ConvertToPhiContext { using TYPE = T; }; template <> -struct ConvertToPtenContext { +struct ConvertToPhiContext { using TYPE = phi::CPUContext; }; #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) template <> -struct ConvertToPtenContext { +struct ConvertToPhiContext { using TYPE = phi::GPUContext; }; #endif #ifdef PADDLE_WITH_XPU template <> -struct ConvertToPtenContext { +struct ConvertToPhiContext { using TYPE = phi::XPUContext; }; #endif diff --git a/paddle/fluid/framework/pten_utils_test.cc b/paddle/fluid/framework/phi_utils_test.cc similarity index 84% rename from paddle/fluid/framework/pten_utils_test.cc rename to paddle/fluid/framework/phi_utils_test.cc index da1431c0efafe3a2253f3dd5721f947a75dfbb71..cbcdf24c9f32b47f3337b4f176753328497d8c85 100644 --- a/paddle/fluid/framework/pten_utils_test.cc +++ b/paddle/fluid/framework/phi_utils_test.cc @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "gtest/gtest.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/selected_rows_utils.h" #include "paddle/fluid/framework/variable.h" -TEST(PtenUtils, TransPtenKernelKeyToOpKernelType) { +TEST(PhiUtils, TransPhiKernelKeyToOpKernelType) { phi::KernelKey kernel_key(phi::Backend::CPU, phi::DataLayout::NCHW, phi::DataType::FLOAT32); auto op_kernel_type = - paddle::framework::TransPtenKernelKeyToOpKernelType(kernel_key); + paddle::framework::TransPhiKernelKeyToOpKernelType(kernel_key); ASSERT_EQ(op_kernel_type.data_type_, paddle::framework::proto::VarType::FP32); ASSERT_EQ(op_kernel_type.data_layout_, paddle::framework::DataLayout::kNCHW); ASSERT_TRUE(paddle::platform::is_cpu_place(op_kernel_type.place_)); @@ -33,7 +33,7 @@ TEST(PtenUtils, TransPtenKernelKeyToOpKernelType) { phi::KernelKey kernel_key_mkldnn(phi::Backend::MKLDNN, phi::DataLayout::NCHW, phi::DataType::FLOAT32); op_kernel_type = - paddle::framework::TransPtenKernelKeyToOpKernelType(kernel_key_mkldnn); + paddle::framework::TransPhiKernelKeyToOpKernelType(kernel_key_mkldnn); ASSERT_EQ(op_kernel_type.data_type_, paddle::framework::proto::VarType::FP32); ASSERT_EQ(op_kernel_type.data_layout_, paddle::framework::DataLayout::kNCHW); ASSERT_TRUE(paddle::platform::is_cpu_place(op_kernel_type.place_)); @@ -45,7 +45,7 @@ TEST(PtenUtils, TransPtenKernelKeyToOpKernelType) { phi::KernelKey kernel_key_cudnn(phi::Backend::GPUDNN, phi::DataLayout::NCHW, phi::DataType::FLOAT32); op_kernel_type = - paddle::framework::TransPtenKernelKeyToOpKernelType(kernel_key_cudnn); + paddle::framework::TransPhiKernelKeyToOpKernelType(kernel_key_cudnn); ASSERT_EQ(op_kernel_type.data_type_, paddle::framework::proto::VarType::FP32); ASSERT_EQ(op_kernel_type.data_layout_, paddle::framework::DataLayout::kNCHW); ASSERT_TRUE(paddle::platform::is_gpu_place(op_kernel_type.place_)); @@ -54,12 +54,12 @@ TEST(PtenUtils, TransPtenKernelKeyToOpKernelType) { #endif } -TEST(PtenUtils, TransOpKernelTypeToPtenKernelKey) { +TEST(PhiUtils, TransOpKernelTypeToPhiKernelKey) { paddle::framework::OpKernelType op_kernel_type( paddle::framework::proto::VarType::FP32, paddle::platform::CPUPlace(), paddle::framework::DataLayout::kNCHW); auto kernel_key = - paddle::framework::TransOpKernelTypeToPtenKernelKey(op_kernel_type); + paddle::framework::TransOpKernelTypeToPhiKernelKey(op_kernel_type); ASSERT_EQ(kernel_key.dtype(), phi::DataType::FLOAT32); ASSERT_EQ(kernel_key.layout(), phi::DataLayout::NCHW); ASSERT_EQ(kernel_key.backend(), phi::Backend::CPU); @@ -69,8 +69,8 @@ TEST(PtenUtils, TransOpKernelTypeToPtenKernelKey) { paddle::framework::proto::VarType::FP32, paddle::platform::CPUPlace(), paddle::framework::DataLayout::kMKLDNN, paddle::framework::LibraryType::kMKLDNN); - auto kernel_key_mkldnn = paddle::framework::TransOpKernelTypeToPtenKernelKey( - op_kernel_type_mkldnn); + auto kernel_key_mkldnn = + paddle::framework::TransOpKernelTypeToPhiKernelKey(op_kernel_type_mkldnn); ASSERT_EQ(kernel_key_mkldnn.dtype(), phi::DataType::FLOAT32); ASSERT_EQ(kernel_key_mkldnn.layout(), phi::DataLayout::MKLDNN); ASSERT_EQ(kernel_key_mkldnn.backend(), phi::Backend::MKLDNN); @@ -82,7 +82,7 @@ TEST(PtenUtils, TransOpKernelTypeToPtenKernelKey) { paddle::framework::DataLayout::kNCHW, paddle::framework::LibraryType::kCUDNN); auto kernel_key_cudnn = - paddle::framework::TransOpKernelTypeToPtenKernelKey(op_kernel_type_cudnn); + paddle::framework::TransOpKernelTypeToPhiKernelKey(op_kernel_type_cudnn); ASSERT_EQ(kernel_key_cudnn.dtype(), phi::DataType::FLOAT32); ASSERT_EQ(kernel_key_cudnn.layout(), phi::DataLayout::NCHW); ASSERT_EQ(kernel_key_cudnn.backend(), phi::Backend::GPUDNN); diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 10eefff093b0e867131c91fb0a8132175a28c6be..10ceae62dccbbab9329b73e0f581b51508511194 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -1457,7 +1457,7 @@ std::ostream& print_tensor>( std::ostream& operator<<(std::ostream& os, const LoD& lod) { // NOTE(xiongkun): // https://stackoverflow.com/questions/5195512/namespaces-and-operator-resolution - // if we don't redefine, the operator << of pten / framework LoD is not found. + // if we don't redefine, the operator << of phi / framework LoD is not found. paddle::string::operator<<(os, lod); return os; } diff --git a/paddle/fluid/imperative/amp_auto_cast.cc b/paddle/fluid/imperative/amp_auto_cast.cc index 6e8bfbb4a7761009f031cd4da74310cbb6294114..149202468be6c6bec833f100adfd4100c520f8f3 100644 --- a/paddle/fluid/imperative/amp_auto_cast.cc +++ b/paddle/fluid/imperative/amp_auto_cast.cc @@ -70,12 +70,12 @@ OpSupportedInfos(const std::string& place, } } - auto pten_kernels = phi::KernelFactory::Instance().kernels(); - for (auto& kernel_pair : pten_kernels) { + auto phi_kernels = phi::KernelFactory::Instance().kernels(); + for (auto& kernel_pair : phi_kernels) { auto op_type = phi::TransToFluidOpName(kernel_pair.first); for (auto& info_pair : kernel_pair.second) { framework::OpKernelType kernel_type = - framework::TransPtenKernelKeyToOpKernelType(info_pair.first); + framework::TransPhiKernelKeyToOpKernelType(info_pair.first); if (is_target_place[query_place](kernel_type.place_) && kernel_type.data_type_ == dtype && all_ops.count(op_type)) { VLOG(4) << op_type << " " << supported_ops.size(); diff --git a/paddle/fluid/imperative/basic_engine.cc b/paddle/fluid/imperative/basic_engine.cc index 97a188e5c9c2712c2c6d819b7e8f0c5ca0b2a47a..8373c7fe50d0222d6b38a400e82239dc8c3590ad 100644 --- a/paddle/fluid/imperative/basic_engine.cc +++ b/paddle/fluid/imperative/basic_engine.cc @@ -154,7 +154,7 @@ void BasicEngine::CheckBackwardInputs(const OpBase& op) { // Here, we use the type of the corresponding forward datatype. tensor->mutable_data( - op.place(), framework::TransToPtenDataType(var->ForwardDataType())); + op.place(), framework::TransToPhiDataType(var->ForwardDataType())); VLOG(6) << "Set ungenerated Grad: " << var->Name() << " as zero with dtype " << framework::DataTypeToString(var->ForwardDataType()); diff --git a/paddle/fluid/imperative/gradient_accumulator.cc b/paddle/fluid/imperative/gradient_accumulator.cc index 3587736a851da57cab6892593a5087dcdd338622..0abc5ad90e2697eb78ff1e21ceb2bc0e97e14a44 100644 --- a/paddle/fluid/imperative/gradient_accumulator.cc +++ b/paddle/fluid/imperative/gradient_accumulator.cc @@ -791,13 +791,13 @@ void EagerGradientAccumulator::SumGrad(std::shared_ptr var, << var->Var().Get().dims(); tensor->Resize(var->Var().Get().dims()); tensor->mutable_data(place, - framework::TransToPtenDataType(var->DataType())); + framework::TransToPhiDataType(var->DataType())); phi::funcs::set_constant(*dev_ctx, tensor, 0.0); } else { auto* tensor = dst_var->MutableVar()->GetMutable(); tensor->mutable_data(place, - framework::TransToPtenDataType(var->DataType())); + framework::TransToPhiDataType(var->DataType())); phi::funcs::set_constant(*dev_ctx, tensor, 0.0); } } @@ -925,13 +925,13 @@ void SortedGradientAccumulator::SumGrad(std::shared_ptr var, << var->Var().Get().dims(); tensor->Resize(var->Var().Get().dims()); tensor->mutable_data(place, - framework::TransToPtenDataType(var->DataType())); + framework::TransToPhiDataType(var->DataType())); phi::funcs::set_constant(*dev_ctx, tensor, 0.0); } else { auto* tensor = dst_var->MutableVar()->GetMutable(); tensor->mutable_data(place, - framework::TransToPtenDataType(var->DataType())); + framework::TransToPhiDataType(var->DataType())); phi::funcs::set_constant(*dev_ctx, tensor, 0.0); } } diff --git a/paddle/fluid/imperative/partial_grad_engine.cc b/paddle/fluid/imperative/partial_grad_engine.cc index f1d0c8afdd50e3868423a9906d9955d7aea66983..56ddbf338619890f8a88bdf09a0bb770ec31bb2f 100644 --- a/paddle/fluid/imperative/partial_grad_engine.cc +++ b/paddle/fluid/imperative/partial_grad_engine.cc @@ -314,10 +314,10 @@ static void FillConstantLike(const VariableWrapper &ref_var, // default data_type for now. if (ref_var.ForwardDataType() != -1) { dst_tensor->mutable_data( - place, framework::TransToPtenDataType(ref_var.ForwardDataType())); + place, framework::TransToPhiDataType(ref_var.ForwardDataType())); } else { - dst_tensor->mutable_data( - place, framework::TransToPtenDataType(ref_var.DataType())); + dst_tensor->mutable_data(place, + framework::TransToPhiDataType(ref_var.DataType())); } phi::funcs::set_constant(*dev_ctx, dst_tensor, value); } diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index 6d18b0a86f0911f38e1c51d61467bf9a01a6de21..9dd1dacc02c25474803ef3177d9cd967ee681714 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -121,7 +121,7 @@ PreparedOp::PreparedOp(const framework::OperatorBase& op, kernel_type_(kernel_type), func_(nullptr), dev_ctx_(dev_ctx), - run_pten_kernel_(true), + run_phi_kernel_(true), pt_kernel_signature_(kernel_signature), pt_kernel_(pt_kernel) {} @@ -151,7 +151,7 @@ PreparedOp PrepareImpl(const NameVarMap& ins, #endif // NOTE(zhiqiu): for kernels on given device, for example NPU, the order to // choose is: - // pten npu kernel > fluid npu kernel > pten cpu kernel > fluid cpu kernel + // phi npu kernel > fluid npu kernel > phi cpu kernel > fluid cpu kernel // 1. get expected kernel key auto dygraph_exe_ctx = DygraphExecutionContext( @@ -168,12 +168,12 @@ PreparedOp PrepareImpl(const NameVarMap& ins, expected_kernel_key) || paddle::platform::is_in_xpu_black_list(op.Type()); #endif - if (phi::KernelFactory::Instance().HasCompatiblePtenKernel(op.Type())) { - pt_kernel_signature = op.GetExpectedPtenKernelArgs(dygraph_exe_ctx); + if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(op.Type())) { + pt_kernel_signature = op.GetExpectedPhiKernelArgs(dygraph_exe_ctx); VLOG(6) << pt_kernel_signature; pt_kernel_name = pt_kernel_signature.name; - pt_kernel_key = TransOpKernelTypeToPtenKernelKey(expected_kernel_key); + pt_kernel_key = TransOpKernelTypeToPhiKernelKey(expected_kernel_key); auto pt_kernel = phi::KernelFactory::Instance().SelectKernel(pt_kernel_name, pt_kernel_key); @@ -195,7 +195,7 @@ PreparedOp PrepareImpl(const NameVarMap& ins, return PreparedOp(op, ctx, expected_kernel_key, pt_kernel_signature, pt_kernel, dev_ctx); } else { - VLOG(6) << "Dynamic mode ChoosePtenKernel - kernel `" << pt_kernel_name + VLOG(6) << "Dynamic mode ChoosePhiKernel - kernel `" << pt_kernel_name << "` not found."; } } @@ -211,7 +211,7 @@ PreparedOp PrepareImpl(const NameVarMap& ins, || is_xpu_unsupport #endif ) { - if (phi::KernelFactory::Instance().HasCompatiblePtenKernel(op.Type())) { + if (phi::KernelFactory::Instance().HasCompatiblePhiKernel(op.Type())) { auto pt_cpu_kernel_key = FallBackToCpu(expected_kernel_key, pt_kernel_key, op); auto pt_cpu_kernel = phi::KernelFactory::Instance().SelectKernel( @@ -423,12 +423,12 @@ static void PreparedOpRunPtImpl( platform::TracerEventType::OperatorInner, 1, platform::EventRole::kInnerOp); - PreparePtenData(pt_kernel, pt_kernel_signature, ins); + PreparePhiData(pt_kernel, pt_kernel_signature, ins); phi::KernelContext pt_kernel_context; - BuildDygraphPtenKernelContext(pt_kernel_signature, pt_kernel, ins, - outs, attrs, default_attrs, dev_ctx, - &pt_kernel_context); + BuildDygraphPhiKernelContext(pt_kernel_signature, pt_kernel, ins, + outs, attrs, default_attrs, dev_ctx, + &pt_kernel_context); pt_kernel(&pt_kernel_context); } @@ -451,7 +451,7 @@ void PreparedOp::Run(const NameVarMap& ins, const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs) { - if (run_pten_kernel_) { + if (run_phi_kernel_) { PreparedOpRunPtImpl(op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins, outs, attrs, default_attrs); @@ -465,7 +465,7 @@ void PreparedOp::Run(const NameVarMap& ins, const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs) { - if (run_pten_kernel_) { + if (run_phi_kernel_) { PreparedOpRunPtImpl( op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins, outs, attrs, default_attrs); @@ -479,7 +479,7 @@ void PreparedOp::Run(const NameVarMap& ins, const NameVarMap& outs, const framework::AttributeMap& attrs, const framework::AttributeMap& default_attrs) { - if (run_pten_kernel_) { + if (run_phi_kernel_) { PreparedOpRunPtImpl( op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins, outs, attrs, default_attrs); diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index 879b3ec3e68a25141c239d00e25fab92914ef068..8e1e2fbe9a12da672a633075ed4c41d3d62cd7e1 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -22,7 +22,7 @@ #include "paddle/fluid/framework/data_transform.h" #include "paddle/fluid/framework/op_kernel_type.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/imperative/execution_context.h" #include "paddle/fluid/imperative/layer.h" @@ -201,9 +201,9 @@ class PreparedOp { framework::OperatorWithKernel::OpKernelFunc func_; platform::DeviceContext* dev_ctx_; // NOTE(chenweihang): Similar op members are used to adapt to - // new pten kernel, if there is a better design in the future, + // new phi kernel, if there is a better design in the future, // we may polish the implementation here - bool run_pten_kernel_{false}; + bool run_phi_kernel_{false}; bool run_kp_kernel_{false}; framework::KernelSignature pt_kernel_signature_; phi::Kernel pt_kernel_; @@ -225,7 +225,7 @@ const inline framework::Attribute& GetAttr( } template -void BuildDygraphPtenKernelContext( +void BuildDygraphPhiKernelContext( const framework::KernelSignature& pt_kernel_signature, const phi::Kernel& pt_kernel, const NameVarMap& ins, const NameVarMap& outs, const framework::AttributeMap& attrs, @@ -327,7 +327,7 @@ void BuildDygraphPtenKernelContext( experimental::ResetTensorDtypeAndLayoutByArgDef(tensor_out, output_defs.at(i)); framework::SetAllocationForOutputTenosr( - tensor_out, phi::TransToPtenPlace(output_defs.at(i).backend)); + tensor_out, phi::TransToPhiPlace(output_defs.at(i).backend)); kernel_ctx->EmplaceBackOutputWithoutSetRange(tensor_out); } @@ -369,7 +369,7 @@ void BuildDygraphPtenKernelContext( auto& ins_vector = ins.at(attr_names[i]); if (ins_vector.size() == 1) { // ShapeTensor kernel_ctx->EmplaceBackAttr(std::move( - experimental::MakePtenScalarArrayFromVar(ins_vector[0]->Var()))); + experimental::MakePhiScalarArrayFromVar(ins_vector[0]->Var()))); } else { // ShapeTensorList std::vector variables; variables.reserve(ins_vector.size()); @@ -377,7 +377,7 @@ void BuildDygraphPtenKernelContext( variables.push_back(var_base->MutableVar()); } kernel_ctx->EmplaceBackAttr(std::move( - experimental::MakePtenScalarArrayFromVarList(variables))); + experimental::MakePhiScalarArrayFromVarList(variables))); } } } else if (attr_defs[i].type_index == @@ -409,7 +409,7 @@ void BuildDygraphPtenKernelContext( } else { // scalar is in the input auto& ins_vector = ins.at(attr_names[i]); kernel_ctx->EmplaceBackAttr(std::move( - experimental::MakePtenScalarFromVar(ins_vector[0]->Var()))); + experimental::MakePhiScalarFromVar(ins_vector[0]->Var()))); } } else { @@ -428,7 +428,7 @@ void BuildDygraphPtenKernelContext( kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(std::string, attr)); } else if (attr_defs[i].type_index == std::type_index(typeid(phi::DataType))) { - auto data_type = framework::TransToPtenDataType( + auto data_type = framework::TransToPhiDataType( static_cast( BOOST_GET_CONST(int, attr))); kernel_ctx->EmplaceBackAttr(data_type); @@ -436,7 +436,7 @@ void BuildDygraphPtenKernelContext( std::type_index(typeid(std::vector))) { if (std::type_index(attr.type()) == std::type_index(typeid(std::vector))) { - // Emplace Back Attr according to the type of Pten_Kernel args. + // Emplace Back Attr according to the type of Phi_Kernel args. const auto& vector_int_attr = BOOST_GET_CONST(std::vector, attr); const std::vector vector_int64_attr(vector_int_attr.begin(), vector_int_attr.end()); @@ -456,9 +456,9 @@ void BuildDygraphPtenKernelContext( } template -void PreparePtenData(const phi::Kernel& pt_kernel, - const framework::KernelSignature& pt_kernel_signature, - const NameVarMap& ins) { +void PreparePhiData(const phi::Kernel& pt_kernel, + const framework::KernelSignature& pt_kernel_signature, + const NameVarMap& ins) { auto& input_names = std::get<0>(pt_kernel_signature.args); auto& input_defs = pt_kernel.args_def().input_defs(); @@ -482,12 +482,12 @@ void PreparePtenData(const phi::Kernel& pt_kernel, if (in_def.backend == phi::Backend::ALL_BACKEND) { continue; } - auto expected_place = phi::TransToPtenPlace(in_def.backend); + auto expected_place = phi::TransToPhiPlace(in_def.backend); if (platform::is_same_place(tensor_in->place(), expected_place)) { continue; } - VLOG(3) << "Pten Transform Variable " << input_names[i] << " from " + VLOG(3) << "Phi Transform Variable " << input_names[i] << " from " << tensor_in->place() << " to " << expected_place; framework::Tensor tmp_tensor; diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index 8681382394b9eea65ddcd8977c96e8a517516edd..3a6365b2af21ae9012fe37293699caed9bb23855 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -446,7 +446,7 @@ void Reducer::InitializeGroups( InitializeDenseGroups(variable_indices_, &group); auto tensor = group.dense_contents_.GetMutable(); tensor->Resize(phi::make_ddim({group.all_length_})) - .mutable_data(place_, framework::TransToPtenDataType(group.dtype_)); + .mutable_data(place_, framework::TransToPhiDataType(group.dtype_)); } // map variables to this group by VariableLocator @@ -738,7 +738,7 @@ void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) { if (!group_tensor.IsInitialized()) { group_tensor.Resize({static_cast(length)}); group_tensor.mutable_data(place_, - framework::TransToPtenDataType(group.dtype_)); + framework::TransToPhiDataType(group.dtype_)); } #ifdef PADDLE_WITH_XPU_BKCL diff --git a/paddle/fluid/imperative/tests/test_group.cc b/paddle/fluid/imperative/tests/test_group.cc index bca7ecc5d17dc814931e3f81a21d67ec43159355..6c304278d21fde7af093b25cdd8f62a1d4528d31 100644 --- a/paddle/fluid/imperative/tests/test_group.cc +++ b/paddle/fluid/imperative/tests/test_group.cc @@ -96,7 +96,7 @@ void GroupConcatSplit(Place place, size_t size) { { // concat auto* tensor = group.dense_contents_.GetMutable(); tensor->Resize(phi::make_ddim({group.all_length_})) - .mutable_data(place, framework::TransToPtenDataType(group.dtype_)); + .mutable_data(place, framework::TransToPhiDataType(group.dtype_)); group.ConcatTensors(*dev_ctx); group.DivNRanks(*dev_ctx, 1); diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 8d3e091dbf5abeff5e32571666e76d50bf91941e..e8e9d895b4e8fb982ccb667352fd6c26228782a5 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -26,7 +26,7 @@ limitations under the License. */ #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/pybind/pybind.h" -// pten +// phi #include "paddle/phi/kernels/declarations.h" DEFINE_string(devices, "", "The devices to be used which is joined by comma."); diff --git a/paddle/fluid/inference/lite/tensor_utils.cc b/paddle/fluid/inference/lite/tensor_utils.cc index 0e4fb3335f3d76eecea85417ac83c205d63ac9c4..eeaa128290339ce8c2ac6961c575d64abaa3c1db 100644 --- a/paddle/fluid/inference/lite/tensor_utils.cc +++ b/paddle/fluid/inference/lite/tensor_utils.cc @@ -198,7 +198,7 @@ void InitDstTensor(framework::LoDTensor* dst, const paddle::lite_api::Tensor& src) { dst->mutable_data( inference::lite::utils::GetNativePlace(src.target()), - framework::TransToPtenDataType(GetNativePrecisionType(src.precision()))); + framework::TransToPhiDataType(GetNativePrecisionType(src.precision()))); SetLoD(dst->mutable_lod(), src.lod()); } @@ -269,7 +269,7 @@ void TensorDataShare(framework::LoDTensor* dst, paddle::lite_api::Tensor* src) { SetLoD(dst->mutable_lod(), src->lod()); dst->ResetHolderWithType( holder, - framework::TransToPtenDataType(GetNativePrecisionType(src->precision()))); + framework::TransToPhiDataType(GetNativePrecisionType(src->precision()))); } } // namespace utils diff --git a/paddle/fluid/operators/benchmark/op_tester.cc b/paddle/fluid/operators/benchmark/op_tester.cc index 915ad2f41cde33ee9519b06b38bb8a59fd37793b..4b1593b1f8b40c0c4380007f85f9bb74fea9cd44 100644 --- a/paddle/fluid/operators/benchmark/op_tester.cc +++ b/paddle/fluid/operators/benchmark/op_tester.cc @@ -24,7 +24,7 @@ limitations under the License. */ #include "paddle/fluid/platform/timer.h" #include "paddle/fluid/pybind/pybind.h" -// pten +// phi #include "paddle/phi/kernels/declarations.h" namespace paddle { diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index 4ca0dded3e7385234e3dc630e6260c08fb45f3a8..bc6cf9d831ff0faf00d3db7fdc6105f301781f8b 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -138,7 +138,7 @@ class CastOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; using CPU = paddle::platform::CPUDeviceContext; -// cast use pten kernel, so no need to REGISTER_OP_CPU_KERNEL here. +// cast use phi kernel, so no need to REGISTER_OP_CPU_KERNEL here. REGISTER_OPERATOR(cast, ops::CastOp, ops::CastOpGradMaker, ops::CastOpGradMaker, diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index 62d747cb9f4001e4fcee64a49ee8a16a49eb2617..034cb47fab189b3c7a712d4d720887de227d8573 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -63,12 +63,12 @@ class CastOpKernel : public framework::OpKernel { out->mutable_data(dev_ctx.GetPlace(), static_cast(out_dtype)); - auto pt_out_dtype = framework::TransToPtenDataType( + auto pt_out_dtype = framework::TransToPhiDataType( static_cast(out_dtype)); // call new kernel phi::CastKernel( - static_cast::TYPE&>(dev_ctx), *in, pt_out_dtype, out); } diff --git a/paddle/fluid/operators/cast_op_xpu.cc b/paddle/fluid/operators/cast_op_xpu.cc index 25b3a446a0a32e61407d2ffa796c30d9a6625532..64324d9772b47de8dfec256f75f60873ce6aafeb 100644 --- a/paddle/fluid/operators/cast_op_xpu.cc +++ b/paddle/fluid/operators/cast_op_xpu.cc @@ -46,11 +46,11 @@ class CastXPUKernel : public framework::OpKernel { out->mutable_data(dev_ctx.GetPlace(), static_cast(out_dtype)); - auto pt_out_dtype = framework::TransToPtenDataType( + auto pt_out_dtype = framework::TransToPhiDataType( static_cast(out_dtype)); - // call pten kernel + // call phi kernel phi::CastKernel( - static_cast::TYPE&>(dev_ctx), *in, pt_out_dtype, out); } diff --git a/paddle/fluid/operators/cholesky_solve_op.h b/paddle/fluid/operators/cholesky_solve_op.h index 86ed7574654959849beb0c1d547a736ad9e1546c..f25fbbb0c698036951c4b9ae8e9ad2778786a1a2 100644 --- a/paddle/fluid/operators/cholesky_solve_op.h +++ b/paddle/fluid/operators/cholesky_solve_op.h @@ -203,7 +203,7 @@ class CholeskySolveGradKernel : public framework::OpKernel { commonterm_conj = helper.Transpose(commonterm_conj); phi::AddRawKernel( - static_cast::TYPE &>(dev_ctx), commonterm, commonterm_conj, -1, &commonterm); diff --git a/paddle/fluid/operators/coalesce_tensor_op.cc b/paddle/fluid/operators/coalesce_tensor_op.cc index 9f27e2238c9c832e62d6de93798b7fab20592a4c..900fd4d8d292e3c4a8884957dceeaa020ee0003e 100644 --- a/paddle/fluid/operators/coalesce_tensor_op.cc +++ b/paddle/fluid/operators/coalesce_tensor_op.cc @@ -54,7 +54,7 @@ struct FillConstantVisitor { * = nullptr) const { #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(dev_ctx_.GetPlace())) { - Tensor tensor_tmp(framework::TransToPtenDataType(dtype_)); + Tensor tensor_tmp(framework::TransToPhiDataType(dtype_)); tensor_tmp.mutable_data({1}, context_.GetPlace()); FillNpuTensorWithConstant(&tensor_tmp, static_cast(value_)); @@ -194,7 +194,7 @@ class CoalesceTensorOpKernel : public framework::OpKernel { void *fused_tensor_ptr = fused_tensor->Resize(phi::make_ddim({static_cast(numel)})) .mutable_data(context.GetPlace(), - framework::TransToPtenDataType(dtype)); + framework::TransToPhiDataType(dtype)); VLOG(10) << "Fused tensor addr " << fused_tensor_ptr; // Init the continuous space diff --git a/paddle/fluid/operators/conj_op.h b/paddle/fluid/operators/conj_op.h index 2a815ef01e1f7acbfa7f1a3d6ea6808c9877155e..b2173d1b53104a132e721cd3f72f7c6e7ace4af1 100644 --- a/paddle/fluid/operators/conj_op.h +++ b/paddle/fluid/operators/conj_op.h @@ -37,7 +37,7 @@ class ConjKernel : public framework::OpKernel { // call new kernel phi::ConjKernel( - static_cast::TYPE&>(dev_ctx), *x, out); } diff --git a/paddle/fluid/operators/dot_op.h b/paddle/fluid/operators/dot_op.h index e8c28ebfeb00878c69b0e80aef5aa505630f40e8..7fd0a8eb164752f24f0fed4959b0036e1a400f5e 100644 --- a/paddle/fluid/operators/dot_op.h +++ b/paddle/fluid/operators/dot_op.h @@ -41,9 +41,9 @@ class DotKernel : public framework::OpKernel { out->mutable_data(x->place()); // call new kernel - phi::DotKernel::TYPE>( - static_cast::TYPE&>(dev_ctx), *x, *y, out); } @@ -66,7 +66,7 @@ class DotGradKernel : public framework::OpKernel { // call new kernel phi::DotGradKernel( - static_cast::TYPE&>(dev_ctx), *tensor_x, *tensor_y, *tensor_dout, tensor_dx, tensor_dy); } diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.h b/paddle/fluid/operators/elementwise/elementwise_add_op.h index ae2e5b33b5f436af1d05595940c766c15219c785..a995877778e4770ea8ae64c051a71b31c1fb1e29 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.h @@ -55,7 +55,7 @@ class ElementwiseAddKernel : public framework::OpKernel { auto &dev_ctx = ctx.device_context(); int axis = ctx.Attr("axis"); phi::AddRawKernel( - static_cast::TYPE &>(dev_ctx), *x, *y, axis, z); #endif diff --git a/paddle/fluid/operators/elementwise/elementwise_div_op.h b/paddle/fluid/operators/elementwise/elementwise_div_op.h index 1df43936920a9b7164c72d21619293301446aff6..c58a7f36548a57a1c8e7770fa282470fba4cc140 100644 --- a/paddle/fluid/operators/elementwise/elementwise_div_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.h @@ -63,11 +63,11 @@ class ElementwiseDivKernel : public framework::OpKernel { auto& dev_ctx = ctx.device_context(); int axis = ctx.Attr("axis"); - auto pt_x = paddle::experimental::MakePtenDenseTensor(*x); - auto pt_y = paddle::experimental::MakePtenDenseTensor(*y); - auto pt_z = paddle::experimental::MakePtenDenseTensor(*z); + auto pt_x = paddle::experimental::MakePhiDenseTensor(*x); + auto pt_y = paddle::experimental::MakePhiDenseTensor(*y); + auto pt_z = paddle::experimental::MakePhiDenseTensor(*z); phi::DivideRawKernel( - static_cast::TYPE&>(dev_ctx), *pt_x.get(), *pt_y.get(), axis, pt_z.get()); } diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cu b/paddle/fluid/operators/elementwise/elementwise_mul_op.cu index a452c43ce2c19b1ee6411ca05d9e370447d54706..45c87a27a180af4798a9f8b31e2edfd0cacb583d 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cu @@ -49,9 +49,9 @@ class ElementwiseMulKernel z_lod->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); - auto pt_x = paddle::experimental::MakePtenDenseTensor(*x_lod); - auto pt_y = paddle::experimental::MakePtenDenseTensor(*y_lod); - auto pt_z = paddle::experimental::MakePtenDenseTensor(*z_lod); + auto pt_x = paddle::experimental::MakePhiDenseTensor(*x_lod); + auto pt_y = paddle::experimental::MakePhiDenseTensor(*y_lod); + auto pt_z = paddle::experimental::MakePhiDenseTensor(*z_lod); phi::MultiplyRawKernel(static_cast(cuda_ctx), *pt_x.get(), *pt_y.get(), axis, pt_z.get()); } else { diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.h b/paddle/fluid/operators/elementwise/elementwise_mul_op.h index 93713be051599966d3b7fc5efa7329247096e0ca..c81266d584468f51030026e1423a649252001f58 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.h @@ -122,11 +122,11 @@ class ElementwiseMulKernel : public framework::OpKernel { auto& dev_ctx = ctx.device_context(); int axis = ctx.Attr("axis"); - auto pt_x = paddle::experimental::MakePtenDenseTensor(*x_lod); - auto pt_y = paddle::experimental::MakePtenDenseTensor(*y); - auto pt_z = paddle::experimental::MakePtenDenseTensor(*z_lod); + auto pt_x = paddle::experimental::MakePhiDenseTensor(*x_lod); + auto pt_y = paddle::experimental::MakePhiDenseTensor(*y); + auto pt_z = paddle::experimental::MakePhiDenseTensor(*z_lod); phi::MultiplyRawKernel( - static_cast::TYPE&>(dev_ctx), *pt_x.get(), *pt_y.get(), axis, pt_z.get()); } else { diff --git a/paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h b/paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h index 52de5f77ed325321513d58530ec37ec0e4a23adc..418779c32e8bc216be1532bf714bc21d91c452aa 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h @@ -31,18 +31,18 @@ void LaunchElementwiseCudaKernel( std::vector pt_outputs; // TODO(YuanRisheng) *_tmp for cache DenseTensor, because the temporary // DenseTensor obj - // generated by MakePtenDenseTensor can be destroyed when exits loop. *_tmp + // generated by MakePhiDenseTensor can be destroyed when exits loop. *_tmp // can be deleted // when DenseTensor support copy constructor. std::vector> pt_inputs_tmp; std::vector> pt_outputs_tmp; for (auto in : ins) { pt_inputs_tmp.emplace_back( - std::move(paddle::experimental::MakePtenDenseTensor(*in))); + std::move(paddle::experimental::MakePhiDenseTensor(*in))); } for (auto out : *outs) { pt_outputs_tmp.emplace_back( - std::move(paddle::experimental::MakePtenDenseTensor(*out))); + std::move(paddle::experimental::MakePhiDenseTensor(*out))); } for (int i = 0; i < pt_inputs_tmp.size(); i++) { pt_inputs.push_back(pt_inputs_tmp[i].get()); diff --git a/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h b/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h index 4a2d92a8c441a9e180c056a19a417be1497c8bae..7d7bb4f26fcf42ec63cd1fab7ec2667a03c8ba4c 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/tensor.h" // only can include the headers in paddle/top/api dirs @@ -34,18 +34,18 @@ void LaunchSameDimsElementwiseCudaKernel( std::vector pt_outputs; // TODO(YuanRisheng) *_tmp for cache DenseTensor, because the temporary // DenseTensor obj - // generated by MakePtenDenseTensor can be destroyed when exits loop. *_tmp + // generated by MakePhiDenseTensor can be destroyed when exits loop. *_tmp // can be deleted // when DenseTensor support copy constructor. std::vector> pt_inputs_tmp; std::vector> pt_outputs_tmp; for (auto in : ins) { pt_inputs_tmp.emplace_back( - std::move(paddle::experimental::MakePtenDenseTensor(*in))); + std::move(paddle::experimental::MakePhiDenseTensor(*in))); } for (auto out : *outs) { pt_outputs_tmp.emplace_back( - std::move(paddle::experimental::MakePtenDenseTensor(*out))); + std::move(paddle::experimental::MakePhiDenseTensor(*out))); } for (int i = 0; i < pt_inputs_tmp.size(); i++) { pt_inputs.push_back(pt_inputs_tmp[i].get()); diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.h b/paddle/fluid/operators/elementwise/elementwise_sub_op.h index 87b647f41352f4fd3cc130597f39c12221c7a903..15c547b493ae045c13ab8d6b14a646cb92716a92 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.h @@ -34,7 +34,7 @@ class ElementwiseSubKernel : public framework::OpKernel { auto& dev_ctx = ctx.device_context(); int axis = ctx.Attr("axis"); phi::SubtractRawKernel( - static_cast::TYPE&>(dev_ctx), *x, *y, axis, z); } @@ -56,7 +56,7 @@ class ElementwiseSubGradKernel : public ElemwiseGradKernel { auto& dev_ctx = ctx.device_context(); phi::SubtractGradKernel( - static_cast::TYPE&>(dev_ctx), *x, *y, *dout, axis, dx, dy); } @@ -86,7 +86,7 @@ class ElementwiseSubDoubleGradKernel : public framework::OpKernel { ddy_optional = *ddy; } phi::SubtractDoubleGradKernel( - static_cast::TYPE&>(dev_ctx), *y, ddx_optional, ddy_optional, *dout, axis, ddout); } diff --git a/paddle/fluid/operators/empty_op.h b/paddle/fluid/operators/empty_op.h index 42c951385a438709569be58507a39230ad77a22d..cb466fffcd7c7358b6e84c18b7895a17b2eaa907 100644 --- a/paddle/fluid/operators/empty_op.h +++ b/paddle/fluid/operators/empty_op.h @@ -39,7 +39,7 @@ class EmptyKernel : public framework::OpKernel { out_tensor->Resize(shape); out_tensor->mutable_data(context.GetPlace(), - framework::TransToPtenDataType(dtype)); + framework::TransToPhiDataType(dtype)); } }; diff --git a/paddle/fluid/operators/fill_any_like_op_npu.cc b/paddle/fluid/operators/fill_any_like_op_npu.cc index 2a914ff2ebd33024d80f8d88fde97f70a2f203a7..b02e60210c085bfcedb22fe915de6700575b0a4c 100644 --- a/paddle/fluid/operators/fill_any_like_op_npu.cc +++ b/paddle/fluid/operators/fill_any_like_op_npu.cc @@ -54,7 +54,7 @@ class FillAnyLikeNPUKernel : public framework::OpKernel { std::isnan(value), false, platform::errors::InvalidArgument("The filled value is NaN.")); - Tensor tensor_tmp(framework::TransToPtenDataType(data_type)); + Tensor tensor_tmp(framework::TransToPhiDataType(data_type)); tensor_tmp.mutable_data({1}, context.GetPlace()); FillNpuTensorWithConstant(&tensor_tmp, static_cast(value)); diff --git a/paddle/fluid/operators/fill_any_like_op_xpu.cc b/paddle/fluid/operators/fill_any_like_op_xpu.cc index 896310cd0918b118db003d784daca87c49c5ab32..ec4ba6e926c41bab8d7ceda20486db39f2d4dabe 100644 --- a/paddle/fluid/operators/fill_any_like_op_xpu.cc +++ b/paddle/fluid/operators/fill_any_like_op_xpu.cc @@ -60,9 +60,9 @@ class FillAnyLikeXPUKernel : public framework::OpKernel { auto& dev_ctx = context.template device_context(); - // call pten kernel + // call phi kernel phi::FullLikeKernel( - static_cast::TYPE&>(dev_ctx), *x, value, phi::DataType::UNDEFINED, out); } diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.h b/paddle/fluid/operators/fill_constant_batch_size_like_op.h index 9d1d1eb7c6af523faa187c6aa4dd58cc8e077a29..31471c6b622684ac2134366bd23b8919ba1f93e5 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.h +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.h @@ -63,7 +63,7 @@ class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel { auto &dev_ctx = *pool.Get(platform::CPUPlace()); phi::funcs::SetConstant functor; out->mutable_data(platform::CPUPlace(), - framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); functor(reinterpret_cast(dev_ctx), out, static_cast(value)); } @@ -72,7 +72,7 @@ class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel { auto &dev_ctx = *pool.Get(ctx.GetPlace()); phi::funcs::SetConstant functor; out->mutable_data(ctx.GetPlace(), - framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); functor(reinterpret_cast(dev_ctx), out, static_cast(value)); } diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op_npu.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op_npu.cc index 9ce433a214dd5becfdd979e635eb83e75216bbaf..5bba4da14aba8bf2a6172b7e212dfca642f527fc 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op_npu.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op_npu.cc @@ -72,13 +72,13 @@ class FillConstantBatchSizeLikeOpNPUKernel : public framework::OpKernel { auto &dev_ctx = *pool.Get(platform::CPUPlace()); phi::funcs::SetConstant functor; out->mutable_data(platform::CPUPlace(), - framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); functor(reinterpret_cast(dev_ctx), out, static_cast(value)); } else { out->mutable_data(ctx.GetPlace(), - framework::TransToPtenDataType(data_type)); - Tensor tensor_tmp(framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); + Tensor tensor_tmp(framework::TransToPhiDataType(data_type)); tensor_tmp.mutable_data({1}, ctx.GetPlace()); FillNpuTensorWithConstant(&tensor_tmp, value); diff --git a/paddle/fluid/operators/fill_constant_op.h b/paddle/fluid/operators/fill_constant_op.h index eccc53d8766e25b6f4445699e09f80581a28cf3e..d401b5b82f2b0defd3f2b17ed199d0bd01510859 100644 --- a/paddle/fluid/operators/fill_constant_op.h +++ b/paddle/fluid/operators/fill_constant_op.h @@ -122,7 +122,7 @@ class FillConstantKernel : public framework::OpKernel { << ((data_type == framework::proto::VarType::BF16) ? "" : ""); tensor->mutable_data(platform::CPUPlace(), - framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); phi::funcs::SetConstant functor; auto &dev_ctx = *pool.Get(platform::CPUPlace()); functor(reinterpret_cast(dev_ctx), @@ -130,7 +130,7 @@ class FillConstantKernel : public framework::OpKernel { } else if (actual_place == 1) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) tensor->mutable_data(ctx.GetPlace(), - framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); phi::funcs::SetConstant functor; auto &dev_ctx = *pool.Get(ctx.GetPlace()); functor(reinterpret_cast(dev_ctx), @@ -142,7 +142,7 @@ class FillConstantKernel : public framework::OpKernel { } else if (actual_place == 2) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) tensor->mutable_data(platform::CUDAPinnedPlace(), - framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); phi::funcs::SetConstant functor; auto &dev_ctx = *pool.Get(platform::CUDAPinnedPlace()); functor( @@ -155,7 +155,7 @@ class FillConstantKernel : public framework::OpKernel { } else if (actual_place == 3) { #ifdef PADDLE_WITH_XPU tensor->mutable_data(ctx.GetPlace(), - framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); phi::funcs::SetConstant functor; auto &dev_ctx = *pool.Get(ctx.GetPlace()); functor(reinterpret_cast(dev_ctx), diff --git a/paddle/fluid/operators/fill_constant_op_npu.cc b/paddle/fluid/operators/fill_constant_op_npu.cc index eb684f818fb08b7c27dbf137c6dd189168382064..79018f2a97448a8c6265a969dad37bce77d1b7ee 100644 --- a/paddle/fluid/operators/fill_constant_op_npu.cc +++ b/paddle/fluid/operators/fill_constant_op_npu.cc @@ -61,7 +61,7 @@ class FillConstantNPUKernel : public framework::OpKernel { out_var->mutable_data(shape, ctx.GetPlace()); if (data_type != framework::proto::VarType::BOOL) { - Tensor tensor_value(framework::TransToPtenDataType(data_type)); + Tensor tensor_value(framework::TransToPhiDataType(data_type)); tensor_value.mutable_data({1}, ctx.GetPlace()); FillNpuTensorWithConstant(&tensor_value, value); NpuOpRunner runner; diff --git a/paddle/fluid/operators/fill_op.h b/paddle/fluid/operators/fill_op.h index c202fa23ca891d459d658cd3eb1b080593c7801d..c5cbffbf5c695ffe9d16a530b4c84db094a72df2 100644 --- a/paddle/fluid/operators/fill_op.h +++ b/paddle/fluid/operators/fill_op.h @@ -49,10 +49,10 @@ class FillKernel : public framework::OpKernel { out.Resize(phi::make_ddim(ctx.Attr>("shape"))); auto dtype = static_cast(ctx.Attr("dtype")); - auto pten_dtype = framework::TransToPtenDataType(dtype); + auto phi_dtype = framework::TransToPhiDataType(dtype); platform::CPUPlace cpu; auto force_cpu = ctx.Attr("force_cpu"); - out.mutable_data(force_cpu ? cpu : ctx.GetPlace(), pten_dtype); + out.mutable_data(force_cpu ? cpu : ctx.GetPlace(), phi_dtype); framework::LoDTensor tensor; @@ -61,7 +61,7 @@ class FillKernel : public framework::OpKernel { } else { // Always make tensor in CPU memory. tensor.Resize(out.dims()); - tensor.mutable_data(cpu, pten_dtype); + tensor.mutable_data(cpu, phi_dtype); } framework::VisitDataType( diff --git a/paddle/fluid/operators/flatten_op.h b/paddle/fluid/operators/flatten_op.h index 3605eabfc1d9bb236b14187c611eed0d149f0acc..5ef13b38c8a86e16cefdc97be6934b313fdb7bc4 100644 --- a/paddle/fluid/operators/flatten_op.h +++ b/paddle/fluid/operators/flatten_op.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/phi/kernels/empty_kernel.h" @@ -132,9 +132,9 @@ class FlattenContiguousRangeKernel : public framework::OpKernel { auto &dev_ctx = context.device_context(); // call new kernel - phi::FlattenKernel::TYPE>( - static_cast::TYPE &>(dev_ctx), *in, start_axis, stop_axis, out); } @@ -153,9 +153,9 @@ class FlattenContiguousRangeGradKernel : public framework::OpKernel { auto &dev_ctx = ctx.device_context(); // call new kernel - phi::FlattenGradKernel::TYPE>( - static_cast::TYPE &>(dev_ctx), *d_out, *xshape, d_x); } diff --git a/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cu b/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cu index 962af435b2312cf876c27e005d19f366d965b1fc..13f1c6808aef2e0873c5ce6493514c47710dcf16 100644 --- a/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cu +++ b/paddle/fluid/operators/fused/fused_embedding_eltwise_layernorm_op.cu @@ -34,9 +34,9 @@ class EmbeddingEltWiseLayerNormKernel : public framework::OpKernel { int input_num = static_cast(ids.size()); framework::Tensor in_ids_( - framework::TransToPtenDataType(framework::proto::VarType::INT64)), + framework::TransToPhiDataType(framework::proto::VarType::INT64)), in_embs_( - framework::TransToPtenDataType(framework::proto::VarType::INT64)); + framework::TransToPhiDataType(framework::proto::VarType::INT64)); framework::DDim in_dim{input_num}; int device_id; #ifdef PADDLE_WITH_HIP diff --git a/paddle/fluid/operators/lu_op.h b/paddle/fluid/operators/lu_op.h index 49f4ff3107026000726738a640d635739023bc62..f323e2e041d994eb01c9d4e934984b8a005ffcec 100644 --- a/paddle/fluid/operators/lu_op.h +++ b/paddle/fluid/operators/lu_op.h @@ -88,8 +88,8 @@ void SetValueCompute(const framework::ExecutionContext& ctx, // set_value is what we want. paddle::framework::TensorCopy(*in, place, out); - Tensor slice_tensor(framework::TransToPtenDataType(dtype)), - pad_tensor(framework::TransToPtenDataType(dtype)); + Tensor slice_tensor(framework::TransToPhiDataType(dtype)), + pad_tensor(framework::TransToPhiDataType(dtype)); slice_tensor.mutable_data(slice_dims, place); pad_tensor.mutable_data(in_dims, place); @@ -147,7 +147,7 @@ void SetValueCompute(const framework::ExecutionContext& ctx, ElementwiseComputeEx, DeviceContext, T>( ctx, &slice_tensor, value_tensor, -1, SubFunctor(), &slice_tensor); } else { - Tensor value_t(framework::TransToPtenDataType(dtype)); + Tensor value_t(framework::TransToPhiDataType(dtype)); auto value_dims = phi::make_ddim(shape); CheckIsDimsMatch(slice_dims_for_assign, value_dims); @@ -224,8 +224,8 @@ void Tensor_Add(const DeviceContext& dev_ctx, const framework::Tensor& src1, out->mutable_data(dev_ctx.GetPlace()); phi::AddRawKernel< - T, typename paddle::framework::ConvertToPtenContext::TYPE>( - static_cast::TYPE>( + static_cast::TYPE&>(dev_ctx), src1, src2, -1, out); } @@ -237,8 +237,8 @@ void Tensor_Sub(const DeviceContext& dev_ctx, const framework::Tensor& src1, out->mutable_data(dev_ctx.GetPlace()); phi::SubtractRawKernel< - T, typename paddle::framework::ConvertToPtenContext::TYPE>( - static_cast::TYPE>( + static_cast::TYPE&>(dev_ctx), src1, src2, -1, out); } diff --git a/paddle/fluid/operators/metrics/accuracy_op_mlu.cc b/paddle/fluid/operators/metrics/accuracy_op_mlu.cc index 1f87513bb4bea0208bc8de945aa56ffed198ab61..2598d3b0277c94a52e1fa14b04c00b595071f312 100644 --- a/paddle/fluid/operators/metrics/accuracy_op_mlu.cc +++ b/paddle/fluid/operators/metrics/accuracy_op_mlu.cc @@ -35,8 +35,8 @@ class AccuracyMLUKernel : public framework::OpKernel { } // cast `indices` or `label` if their type is not INT32 - Tensor indices_int32(framework::TransToPtenDataType(VT::INT32)); - Tensor label_int32(framework::TransToPtenDataType(VT::INT32)); + Tensor indices_int32(framework::TransToPhiDataType(VT::INT32)); + Tensor label_int32(framework::TransToPhiDataType(VT::INT32)); auto indices_type = framework::TransToProtoVarType(indices->type()); if (indices_type != VT::INT32) { PADDLE_ENFORCE_EQ(MLUSupportsCast(indices_type, VT::INT32), true, @@ -78,7 +78,7 @@ class AccuracyMLUKernel : public framework::OpKernel { // equal MLUCnnlTensorDesc indices_int32_desc(indices_int32); MLUCnnlTensorDesc label_int32_desc(label_int32); - Tensor equal_tensor(framework::TransToPtenDataType(VT::BOOL)); + Tensor equal_tensor(framework::TransToPhiDataType(VT::BOOL)); equal_tensor.Resize(indices->dims()); equal_tensor.mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc equal_tensor_desc(equal_tensor); @@ -88,7 +88,7 @@ class AccuracyMLUKernel : public framework::OpKernel { GetBasePtr(&equal_tensor)); // cast equal - Tensor equal_fp32(framework::TransToPtenDataType(VT::FP32)); + Tensor equal_fp32(framework::TransToPhiDataType(VT::FP32)); equal_fp32.Resize(indices->dims()); equal_fp32.mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc equal_fp32_desc(equal_fp32); @@ -99,7 +99,7 @@ class AccuracyMLUKernel : public framework::OpKernel { // [correct] // reduce_max - Tensor correct_max(framework::TransToPtenDataType(VT::FP32)); + Tensor correct_max(framework::TransToPhiDataType(VT::FP32)); correct_max.Resize(phi::make_ddim({num_samples})); correct_max.mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc correct_max_desc(correct_max); @@ -112,7 +112,7 @@ class AccuracyMLUKernel : public framework::OpKernel { correct_max_desc.get(), GetBasePtr(&correct_max)); // reduce_sum - Tensor correct_sum(framework::TransToPtenDataType(VT::FP32)); + Tensor correct_sum(framework::TransToPhiDataType(VT::FP32)); correct_sum.Resize(correct->dims()); correct_sum.mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc correct_sum_desc(correct_sum); @@ -138,7 +138,7 @@ class AccuracyMLUKernel : public framework::OpKernel { MLUCnnl::Fill(ctx, num_samples, total_desc.get(), GetBasePtr(total)); // use `total` of type `float32` for calculating accuracy - Tensor total_fp32(framework::TransToPtenDataType(VT::FP32)); + Tensor total_fp32(framework::TransToPhiDataType(VT::FP32)); total_fp32.Resize(total->dims()); total_fp32.mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc total_fp32_desc(total_fp32); diff --git a/paddle/fluid/operators/mlu/mlu_baseop.h b/paddle/fluid/operators/mlu/mlu_baseop.h index 056e0690c01fdb1e7a9726db6905c05c7dc1eb54..2cbecba9fa081970221242555b6b805ff9acae83 100644 --- a/paddle/fluid/operators/mlu/mlu_baseop.h +++ b/paddle/fluid/operators/mlu/mlu_baseop.h @@ -85,7 +85,7 @@ inline cnnlDataType_t ToCnnlDataType( inline cnnlDataType_t ToCnnlDataType( const paddle::framework::proto::VarType::Type& type) { - return ToCnnlDataType(framework::TransToPtenDataType(type)); + return ToCnnlDataType(framework::TransToPhiDataType(type)); } template diff --git a/paddle/fluid/operators/reduce_ops/reduce_op.h b/paddle/fluid/operators/reduce_ops/reduce_op.h index eb39f069e56b73aa34e3323768a4b72cd6c737f4..65cca94814e88111239aef3559285d6fe321a72d 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_op.h @@ -257,12 +257,12 @@ class ReduceKernel : public framework::OpKernel { std::vector tmp_dims(dims.begin(), dims.end()); // call new kernel - phi::Reduce::TYPE, - T, Functor>( - static_cast::TYPE, T, + Functor>( + static_cast::TYPE&>(dev_ctx), *input, reduce_all, tmp_dims, keep_dim, - framework::TransToPtenDataType(cast_out_dtype), output); + framework::TransToPhiDataType(cast_out_dtype), output); } }; template @@ -684,7 +684,7 @@ class ReduceCudaKernel : public framework::OpKernel { const Tensor* input = context.Input("X"); Tensor* output = context.Output("Out"); auto out_dtype = context.Attr("out_dtype"); - auto pt_out_dtype = paddle::framework::TransToPtenDataType( + auto pt_out_dtype = paddle::framework::TransToPhiDataType( static_cast(out_dtype)); std::vector dims = context.Attr>("dim"); @@ -714,7 +714,7 @@ class ReduceCudaGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out")); auto* d_x = context.Output(framework::GradVarName("X")); auto out_dtype = context.Attr("in_dtype"); - auto pt_out_dtype = framework::TransToPtenDataType( + auto pt_out_dtype = framework::TransToPhiDataType( static_cast(out_dtype)); // get reduce_dim and reduce_num for reduce_mean_grad int dim_size = in_x->dims().size(); @@ -735,8 +735,8 @@ class ReduceCudaGradKernel : public framework::OpKernel { } else { d_x->mutable_data(dev_ctx.GetPlace(), d_out->dtype()); } - auto pt_d_out = paddle::experimental::MakePtenDenseTensor(new_d_out); - auto pt_d_x = paddle::experimental::MakePtenDenseTensor(*d_x); + auto pt_d_out = paddle::experimental::MakePhiDenseTensor(new_d_out); + auto pt_d_x = paddle::experimental::MakePhiDenseTensor(*d_x); if (out_dtype <= 0) { pt_out_dtype = d_out->dtype(); } diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 0e74a23523b7d5182fabff88d08f6cc3f56a1783..8d99a60b12967a55e0cc208c6ae96c0dabb5f473 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" // only can include the headers in paddle/phi/api dirs #include "paddle/phi/api/lib/utils/tensor_utils.h" diff --git a/paddle/fluid/operators/scale_op_xpu.cc b/paddle/fluid/operators/scale_op_xpu.cc index d6e8f3e5aa1086900d0144ea8757a05776b9c9b0..40f5699a29b355864652b5d899d1918ec663cf0b 100644 --- a/paddle/fluid/operators/scale_op_xpu.cc +++ b/paddle/fluid/operators/scale_op_xpu.cc @@ -42,9 +42,9 @@ class ScaleXPUKernel : public framework::OpKernel { framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(out_var); out->mutable_data(in->place()); auto& dev_ctx = ctx.template device_context(); - // call pten kernel + // call phi kernel phi::ScaleKernel( - static_cast::TYPE&>(dev_ctx), *in, scale, bias, bias_after_scale, out); } diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op_mlu.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op_mlu.cc index a51f68530caf88a8f5abe2b4615180266f409a8c..1cd6f8b7698b949a8e198c766fcf193e13481298 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op_mlu.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op_mlu.cc @@ -87,7 +87,7 @@ class SoftmaxWithCrossEntropyMLUKernel : public framework::OpKernel { platform::errors::InvalidArgument( "If soft_label=False, axis must be -1 or" " can be regard as last dimention in mlu kernel.")); - framework::Tensor labels_int32(framework::TransToPtenDataType(VT::INT32)); + framework::Tensor labels_int32(framework::TransToPhiDataType(VT::INT32)); labels_int32.Resize(labels->dims()); labels_int32.mutable_data(ctx.GetPlace()); diff --git a/paddle/fluid/operators/top_k_op_mlu.cc b/paddle/fluid/operators/top_k_op_mlu.cc index a9f835f6fe2c25d6ffdfae93e1c7cd170db6b891..102902bdaaaaf4a6a94699f561a5e91213be8c44 100644 --- a/paddle/fluid/operators/top_k_op_mlu.cc +++ b/paddle/fluid/operators/top_k_op_mlu.cc @@ -47,7 +47,7 @@ class TopkMLUKernel : public framework::OpKernel { const bool sorted = true; const int axis = -1; // cnnl only support int32/int16 type of indices - framework::Tensor indices_int32(framework::TransToPtenDataType(VT::INT32)); + framework::Tensor indices_int32(framework::TransToPhiDataType(VT::INT32)); indices_int32.Resize(indices->dims()); indices_int32.mutable_data(place); diff --git a/paddle/fluid/operators/top_k_v2_op_mlu.cc b/paddle/fluid/operators/top_k_v2_op_mlu.cc index 7bada0179a1c5e73669b07fd77171f764db6e21c..5b8a6b3e75449508afa5d316d81f97ab815c9ea9 100644 --- a/paddle/fluid/operators/top_k_v2_op_mlu.cc +++ b/paddle/fluid/operators/top_k_v2_op_mlu.cc @@ -55,7 +55,7 @@ class TopkV2MLUKernel : public framework::OpKernel { indices->mutable_data(place); // cnnl only support int32/int16 type of indices - framework::Tensor indices_int32(framework::TransToPtenDataType(VT::INT32)); + framework::Tensor indices_int32(framework::TransToPhiDataType(VT::INT32)); indices_int32.Resize(indices->dims()); indices_int32.mutable_data(place); diff --git a/paddle/fluid/operators/uniform_random_inplace_op.cu b/paddle/fluid/operators/uniform_random_inplace_op.cu index 1c7b9a27f868821ceb20c720548b4df0ee6bcd40..b8d8467b7eba9f360d8b2043bd4ed3f63e42725a 100644 --- a/paddle/fluid/operators/uniform_random_inplace_op.cu +++ b/paddle/fluid/operators/uniform_random_inplace_op.cu @@ -36,7 +36,7 @@ class GPUUniformRandomInplaceGradKernel : public framework::OpKernel { ctx.template device_context(); float value = static_cast(0.0f); phi::FullKernel( - static_cast::TYPE&>(dev_cxt), dims, value, phi::DataType::UNDEFINED, dx); } diff --git a/paddle/fluid/platform/device/ipu/ipu_executor.cc b/paddle/fluid/platform/device/ipu/ipu_executor.cc index 91ab7f3f4f052707ce7ae57147169889cdc4c259..c124d58957fe642365bd5bbf074bc15bfd74c6ba 100644 --- a/paddle/fluid/platform/device/ipu/ipu_executor.cc +++ b/paddle/fluid/platform/device/ipu/ipu_executor.cc @@ -113,7 +113,7 @@ void Executor::Run(const std::vector &inputs, auto fetch_dtype = fetch_info.dataType(); auto paddle_type = PopartType2VarType(fetch_dtype); tensor->mutable_data(ctx.GetPlace(), - framework::TransToPtenDataType(paddle_type)); + framework::TransToPhiDataType(paddle_type)); anchor_wrappers.emplace(tensor_id, PaddleIArray(tensor)); popart_anchors.emplace(tensor_id, anchor_wrappers.at(tensor_id)); } diff --git a/paddle/fluid/platform/device/npu/npu_op_runner.cc b/paddle/fluid/platform/device/npu/npu_op_runner.cc index 90c0851d79d8079d35c4bf035f130c9c86089c7e..d45492391dc88ce0c690e0768e080dd989a0539c 100644 --- a/paddle/fluid/platform/device/npu/npu_op_runner.cc +++ b/paddle/fluid/platform/device/npu/npu_op_runner.cc @@ -467,7 +467,7 @@ void NpuOpRunner::TypeAdapter( } else { tmp_inputs[i].Resize(inputs[i].dims()); tmp_inputs[i].mutable_data(dev_ctx.GetPlace(), - framework::TransToPtenDataType(input_type[i])); + framework::TransToPhiDataType(input_type[i])); const auto &cast_runner = NpuOpRunner( "Cast", {inputs[i]}, {tmp_inputs[i]}, @@ -484,7 +484,7 @@ void NpuOpRunner::TypeAdapter( } else { tmp_outputs[i].Resize(outputs[i].dims()); tmp_outputs[i].mutable_data( - dev_ctx.GetPlace(), framework::TransToPtenDataType(output_type[i])); + dev_ctx.GetPlace(), framework::TransToPhiDataType(output_type[i])); } } diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 285c6a4c130530987f3f63b1eecdf2ed1593ef09..01de7349f4823a66b2d180f3d1493477f361273a 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -1056,7 +1056,7 @@ class ReorderMKLDNNHandler { platform::Place place) { auto dst_md = platform::MKLDNNMemDesc(dims_, dtype_dst_, fmt); auto dst_data = output->mutable_data( - place, framework::TransToPtenDataType(vtype_dst_), dst_md.get_size()); + place, framework::TransToPhiDataType(vtype_dst_), dst_md.get_size()); return std::make_shared(dst_md, engine_, dst_data); } @@ -1065,7 +1065,7 @@ class ReorderMKLDNNHandler { const MKLDNNMemoryFormat& fmt, platform::Place place) { auto dst_md = platform::MKLDNNMemDesc(dims, dtype_dst_, fmt); auto dst_data = output->mutable_data( - place, framework::TransToPtenDataType(vtype_dst_), dst_md.get_size()); + place, framework::TransToPhiDataType(vtype_dst_), dst_md.get_size()); return std::make_shared(dst_md, engine_, dst_data); } diff --git a/paddle/fluid/platform/transform.h b/paddle/fluid/platform/transform.h index 49690d1c66be74090c684d09f50e6c0d7b67d787..6f714a677033bb87d1a221f62baffa1112726571 100644 --- a/paddle/fluid/platform/transform.h +++ b/paddle/fluid/platform/transform.h @@ -59,7 +59,7 @@ struct Transform { BinaryOperation op); }; -// NOTE: After the pten kernel is migrated, it needs to be deleted. +// NOTE: After the phi kernel is migrated, it needs to be deleted. template <> struct Transform { template diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index d9a2dcb6869096a5f08675bb6dc7994cc8c9889b..1052f93d32ec3cb626577c4b584cc6172c83da2e 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -75,7 +75,7 @@ void EmptyTensorInitializer(TensorObject* self, const std::string& name, std::shared_ptr dense_tensor = std::make_shared( phi::make_intrusive(place), - phi::DenseTensorMeta(paddle::framework::TransToPtenDataType(dtype), + phi::DenseTensorMeta(paddle::framework::TransToPhiDataType(dtype), ddims)); if (phi::product(ddims) > 0) { dense_tensor->mutable_data(place); @@ -133,7 +133,7 @@ void InitTensorWithTensor(TensorObject* self, VLOG(4) << "Same place, do ShareDataWith"; } else { self->tensor.set_impl( - src.copy_to(phi::TransToPtenBackend(place), true).impl()); + src.copy_to(phi::TransToPhiBackend(place), true).impl()); VLOG(4) << "Different place, do TensorCopy"; } if (src.get_autograd_meta()) { @@ -157,7 +157,7 @@ void InitTensorWithFrameworkTensor(TensorObject* self, auto temp = paddle::experimental::Tensor(std::make_shared(src)); self->tensor.set_impl( - temp.copy_to(phi::TransToPtenBackend(place), true).impl()); + temp.copy_to(phi::TransToPhiBackend(place), true).impl()); VLOG(4) << "Different place, do TensorCopy"; } egr::EagerUtils::autograd_meta(&(self->tensor))->SetPersistable(false); diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index b825e9265a8cd8b080df7fd316b33007c2445384..0b04dc7347ce78f87d6f8d81e30eb4135fd965ed 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -135,7 +135,7 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args, auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2); bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3); - dst = src.copy_to(phi::TransToPtenBackend(place), blocking); + dst = src.copy_to(phi::TransToPhiBackend(place), blocking); egr::EagerUtils::autograd_meta(&dst)->SetStopGradient( egr::EagerUtils::autograd_meta(&(src))->StopGradient()); egr::EagerUtils::autograd_meta(&dst)->SetPersistable( diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 221d4d53d0663e2ac932728efb0b65682bd44bb7..f11a2ab2517fb481f184c9b68b2558c999d88ec9 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -191,7 +191,7 @@ static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args, bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0); auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1); auto cp_tensor = - self->tensor.copy_to(phi::TransToPtenBackend(place), blocking); + self->tensor.copy_to(phi::TransToPhiBackend(place), blocking); egr::EagerUtils::autograd_meta(&cp_tensor)->SetStopGradient(true); egr::EagerUtils::autograd_meta(&cp_tensor) ->SetPersistable( diff --git a/paddle/fluid/pybind/eager_op_function_generator.cc b/paddle/fluid/pybind/eager_op_function_generator.cc index 4fe47d5a8427d11f560e73990ea8bad7bae7a929..c15c171799f4421fc3e8b40a84abdbb062709dc7 100644 --- a/paddle/fluid/pybind/eager_op_function_generator.cc +++ b/paddle/fluid/pybind/eager_op_function_generator.cc @@ -32,7 +32,7 @@ #endif #include "paddle/fluid/pybind/op_function_generator.h" -// pten +// phi #include "paddle/phi/kernels/declarations.h" // clang-format off @@ -365,9 +365,9 @@ GenerateOpFunctions() { auto& op_type = op_proto->type(); // Skip ooerator which is not inherit form OperatorWithKernel, like while, // since only OperatorWithKernel can run in dygraph mode. - // if the pten lib contains op kernel, we still generate ops method + // if the phi lib contains op kernel, we still generate ops method if (!all_kernels.count(op_type) && - !phi::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) { + !phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type)) { continue; } std::string func_name = "eager_api_" + op_type; diff --git a/paddle/fluid/pybind/kernel_signature_generator.cc b/paddle/fluid/pybind/kernel_signature_generator.cc index 14e4fac7cdd95ac3b33d64741c4b2f461a7225be..8283a249ded4c0c790add73573621252bc8954d8 100644 --- a/paddle/fluid/pybind/kernel_signature_generator.cc +++ b/paddle/fluid/pybind/kernel_signature_generator.cc @@ -15,7 +15,7 @@ #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/pybind/pybind.h" // NOLINT #include "paddle/phi/core/compat/op_utils.h" #include "paddle/phi/core/kernel_factory.h" diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index cbbe56985b2adaab0a4a33214132066332cdcd79..9d5bcfac494cba0c550cf7f2751f485b689473b9 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -32,7 +32,7 @@ #include "paddle/fluid/framework/fleet/ascend_wrapper.h" #endif -// pten +// phi #include "paddle/phi/kernels/declarations.h" // NOTE(pangyoki): Inplace OP with duplicable input. @@ -400,9 +400,9 @@ GenerateOpFunctions() { auto& op_type = op_proto->type(); // Skip operator which is not inherit form OperatorWithKernel, like while, // since only OperatorWithKernel can run in dygraph mode. - // if the pten lib contains op kernel, we still generate ops method + // if the phi lib contains op kernel, we still generate ops method if (!all_kernels.count(op_type) && - !phi::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) { + !phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type)) { continue; } diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 1ea9c7c65d5f5c9e4a18091d9461e5cbd1c2a779..6e553ad2e60e292881fa8bb0294ea2a247656b67 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -50,8 +50,8 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/parallel_executor.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/prune.h" -#include "paddle/fluid/framework/pten_utils.h" #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/save_load_util.h" #include "paddle/fluid/framework/scope_pool.h" @@ -464,7 +464,7 @@ static void inline CreateVariableIfNotExit( tensor_temp->Resize(phi::make_ddim(var_desc.GetShape())); tensor_temp->mutable_data( exe->GetPlace(), - framework::TransToPtenDataType(var_desc.GetDataType())); + framework::TransToPhiDataType(var_desc.GetDataType())); } } } else { @@ -671,60 +671,60 @@ PYBIND11_MODULE(core_noavx, m) { m.def("_get_use_default_grad_op_desc_maker_ops", [] { return OpInfoMap::Instance().GetUseDefaultGradOpDescMakerOps(); }); - m.def( - "_get_all_register_op_kernels", - [](const std::string &lib) { - std::unordered_map> - all_kernels_info; - if (lib == "fluid" || lib == "all") { - auto &all_kernels = - paddle::framework::OperatorWithKernel::AllOpKernels(); - - for (auto &kernel_pair : all_kernels) { - auto op_type = kernel_pair.first; - std::vector kernel_types; - for (auto &info_pair : kernel_pair.second) { - paddle::framework::OpKernelType kernel_type = info_pair.first; - kernel_types.emplace_back( - paddle::framework::KernelTypeToString(kernel_type)); + m.def("_get_all_register_op_kernels", + [](const std::string &lib) { + std::unordered_map> + all_kernels_info; + if (lib == "fluid" || lib == "all") { + auto &all_kernels = + paddle::framework::OperatorWithKernel::AllOpKernels(); + + for (auto &kernel_pair : all_kernels) { + auto op_type = kernel_pair.first; + std::vector kernel_types; + for (auto &info_pair : kernel_pair.second) { + paddle::framework::OpKernelType kernel_type = info_pair.first; + kernel_types.emplace_back( + paddle::framework::KernelTypeToString(kernel_type)); + } + all_kernels_info.emplace(op_type, kernel_types); } - all_kernels_info.emplace(op_type, kernel_types); } - } - if (lib == "pten" || lib == "all") { - auto pten_kernels = phi::KernelFactory::Instance().kernels(); - for (auto &kernel_pair : pten_kernels) { - auto op_type = phi::TransToFluidOpName(kernel_pair.first); - std::vector kernel_types; - for (auto &info_pair : kernel_pair.second) { - framework::OpKernelType kernel_type = - framework::TransPtenKernelKeyToOpKernelType(info_pair.first); - auto kernel_type_str = framework::KernelTypeToString(kernel_type); - if (all_kernels_info.count(op_type)) { - if (std::find(all_kernels_info[op_type].begin(), - all_kernels_info[op_type].end(), - kernel_type_str) == - all_kernels_info[op_type].end()) { - all_kernels_info[op_type].emplace_back(kernel_type_str); + if (lib == "phi" || lib == "all") { + auto phi_kernels = phi::KernelFactory::Instance().kernels(); + for (auto &kernel_pair : phi_kernels) { + auto op_type = phi::TransToFluidOpName(kernel_pair.first); + std::vector kernel_types; + for (auto &info_pair : kernel_pair.second) { + framework::OpKernelType kernel_type = + framework::TransPhiKernelKeyToOpKernelType(info_pair.first); + auto kernel_type_str = + framework::KernelTypeToString(kernel_type); + if (all_kernels_info.count(op_type)) { + if (std::find(all_kernels_info[op_type].begin(), + all_kernels_info[op_type].end(), + kernel_type_str) == + all_kernels_info[op_type].end()) { + all_kernels_info[op_type].emplace_back(kernel_type_str); + } + } else { + kernel_types.emplace_back(kernel_type_str); } - } else { - kernel_types.emplace_back(kernel_type_str); } - } - if (!kernel_types.empty()) { - all_kernels_info.emplace(op_type, kernel_types); + if (!kernel_types.empty()) { + all_kernels_info.emplace(op_type, kernel_types); + } } } - } - return all_kernels_info; - }, - py::arg("lib") = "all", - R"DOC( + return all_kernels_info; + }, + py::arg("lib") = "all", + R"DOC( Return the registered kernels in paddle. Args: - lib[string]: the libarary, could be 'pten', 'fluid' and 'all'. + lib[string]: the libarary, could be 'phi', 'fluid' and 'all'. )DOC"); // NOTE(zjl): ctest would load environment variables at the beginning even @@ -823,39 +823,39 @@ PYBIND11_MODULE(core_noavx, m) { .def("_mutable_data", [](framework::Tensor &self, paddle::platform::CPUPlace &place, paddle::framework::proto::VarType::Type type) { - return reinterpret_cast(self.mutable_data( - place, framework::TransToPtenDataType(type))); + return reinterpret_cast( + self.mutable_data(place, framework::TransToPhiDataType(type))); }) .def("_mutable_data", [](framework::Tensor &self, paddle::platform::XPUPlace &place, paddle::framework::proto::VarType::Type type) { - return reinterpret_cast(self.mutable_data( - place, framework::TransToPtenDataType(type))); + return reinterpret_cast( + self.mutable_data(place, framework::TransToPhiDataType(type))); }) .def("_mutable_data", [](framework::Tensor &self, paddle::platform::CUDAPlace &place, paddle::framework::proto::VarType::Type type) { - return reinterpret_cast(self.mutable_data( - place, framework::TransToPtenDataType(type))); + return reinterpret_cast( + self.mutable_data(place, framework::TransToPhiDataType(type))); }) .def("_mutable_data", [](framework::Tensor &self, paddle::platform::CUDAPinnedPlace &place, paddle::framework::proto::VarType::Type type) { - return reinterpret_cast(self.mutable_data( - place, framework::TransToPtenDataType(type))); + return reinterpret_cast( + self.mutable_data(place, framework::TransToPhiDataType(type))); }) .def("_mutable_data", [](framework::Tensor &self, paddle::platform::MLUPlace &place, paddle::framework::proto::VarType::Type type) { - return reinterpret_cast(self.mutable_data( - place, framework::TransToPtenDataType(type))); + return reinterpret_cast( + self.mutable_data(place, framework::TransToPhiDataType(type))); }) .def("_clear", &framework::Tensor::clear) .def("_mutable_data", [](framework::Tensor &self, paddle::platform::NPUPlace &place, paddle::framework::proto::VarType::Type type) { - return reinterpret_cast(self.mutable_data( - place, framework::TransToPtenDataType(type))); + return reinterpret_cast( + self.mutable_data(place, framework::TransToPhiDataType(type))); }) .def("_copy_from", &TensorCopyFrom, py::arg("tensor"), py::arg("place"), py::arg("batch_size") = -1) diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 49bacc1cd6d852a33d1d14e50d857039821d5303..e7abd64ec4439611c307440597c7278cabb03ab9 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -324,7 +324,7 @@ void SetTensorFromPyArrayT( if (zero_copy) { auto holder = std::make_shared>(array); auto type = framework::ToDataType(std::type_index(typeid(T))); - self->ResetHolderWithType(holder, framework::TransToPtenDataType(type)); + self->ResetHolderWithType(holder, framework::TransToPhiDataType(type)); } else { auto dst = self->mutable_data(place); std::memcpy(dst, array.data(), array.nbytes()); @@ -348,7 +348,7 @@ void SetTensorFromPyArrayT( if (zero_copy) { auto holder = std::make_shared>(array); auto type = framework::ToDataType(std::type_index(typeid(T))); - self->ResetHolderWithType(holder, framework::TransToPtenDataType(type)); + self->ResetHolderWithType(holder, framework::TransToPhiDataType(type)); } else { // IPU does not store Tensor data, Tensor will be created on CPU if (!self->initialized()) { @@ -518,7 +518,7 @@ void SetUVATensorFromPyArray( cuda_device_pointer, need_allocate_size, platform::CUDAPlace(device_id)); self_tensor->ResetHolderWithType(holder, - framework::TransToPtenDataType(data_type)); + framework::TransToPhiDataType(data_type)); #endif } diff --git a/paddle/phi/api/all.h b/paddle/phi/api/all.h index 748ed11058af6d864a4b64c1e947787da5f2a234..154b84670aaf992833fccf9297d8b16a081e173f 100644 --- a/paddle/phi/api/all.h +++ b/paddle/phi/api/all.h @@ -24,12 +24,12 @@ limitations under the License. */ #endif #endif -// new pten apis +// new phi apis #include "paddle/phi/api/include/api.h" #include "paddle/phi/api/include/sparse_api.h" #include "paddle/phi/api/include/tensor.h" -// pten common headers +// phi common headers #include "paddle/phi/common/backend.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/layout.h" diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index db0c28198e80a863030b740d192ef662be43fba6..c268742fa567bffecb2fd17a773ab56aee019853 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -49,8 +49,6 @@ namespace paddle { namespace experimental { -class CompatiblePTenTensorUtils; - class AbstractAutogradMeta { public: // No AbstractAutogradMeta should be created @@ -59,7 +57,7 @@ class AbstractAutogradMeta { /** * Tensor is the API description of the basic data structure in the - * [ "Paddle Tensor Operation (pten)" Library ]. + * [ "Paddle Tensor Operation (phi)" Library ]. * * It is not limited to a simple n-dimensional array. * It contains a smart pointer to `TensorImpl`. The data description contained @@ -366,7 +364,7 @@ class PADDLE_API Tensor final { /* Part 5: Data Transform methods */ /* Alert!!!!: All copy method can only deep copy impl, autograd info only be * copied */ - /* out of pten */ + /* out of phi */ /** * @brief Copy the current Tensor data to the specified device * and return the new Tensor. It's usually used to set the input tensor data. @@ -476,9 +474,6 @@ class PADDLE_API Tensor final { /* Part 9: Auto generated Tensor methods */ - private: - friend class CompatiblePTenTensorUtils; - private: /** * [ Why use abstract TensorImpl interface here? ] diff --git a/paddle/phi/api/lib/api_custom_impl.cc b/paddle/phi/api/lib/api_custom_impl.cc index 89a51dde463129b567154171bb17da5e12100ae5..c7400b93fcdc18314318fae9482e1e5e5bfb8aef 100644 --- a/paddle/phi/api/lib/api_custom_impl.cc +++ b/paddle/phi/api/lib/api_custom_impl.cc @@ -58,7 +58,7 @@ Tensor copy_to_impl(const Tensor& x, Backend backend, bool blocking) { auto* kernel_fn = kernel.GetVariadicKernelFn(); (*kernel_fn)( - *dev_ctx, *dense_x, phi::TransToPtenPlace(backend), blocking, kernel_out); + *dev_ctx, *dense_x, phi::TransToPhiPlace(backend), blocking, kernel_out); return out; } diff --git a/paddle/phi/api/lib/api_registry.h b/paddle/phi/api/lib/api_registry.h index 3783620ea449b46ab17ae1ac7d9f7e80ef08cae9..212a2f96452f69496d9ca60fdc3c8cdb643b9679 100644 --- a/paddle/phi/api/lib/api_registry.h +++ b/paddle/phi/api/lib/api_registry.h @@ -27,7 +27,7 @@ namespace experimental { #endif /** - * Now there is no module to call pten's API. When compiling, the function + * Now there is no module to call phi's API. When compiling, the function * implementation will be optimized. Therefore, the symbol will be exposed * manually for the time being. * @@ -41,7 +41,7 @@ namespace experimental { #define PD_DECLARE_API(name) \ extern PADDLE_API int RegisterSymbolsFor##name(); \ - UNUSED static int use_pten_api_##name = RegisterSymbolsFor##name() + UNUSED static int use_phi_api_##name = RegisterSymbolsFor##name() } // namespace experimental } // namespace paddle diff --git a/paddle/phi/api/lib/api_utils.h b/paddle/phi/api/lib/api_utils.h index d44dde3b74dd27ee4271504e3837957365db3c27..6c1fa97c0f52a697383a3526220cc758d778823d 100644 --- a/paddle/phi/api/lib/api_utils.h +++ b/paddle/phi/api/lib/api_utils.h @@ -106,7 +106,7 @@ inline paddle::optional MakeMetaTensor( inline phi::DenseTensor* SetKernelOutput(Backend backend, Tensor* out) { if (!out->initialized()) { auto dense_tensor = std::make_shared( - phi::make_intrusive(phi::TransToPtenPlace(backend)), + phi::make_intrusive(phi::TransToPhiPlace(backend)), phi::DenseTensorMeta()); out->set_impl(dense_tensor); return dense_tensor.get(); @@ -120,7 +120,7 @@ inline std::vector SetKernelOutput( std::vector results(out_size); for (size_t i = 0; i < out_size; ++i) { auto tensor_ptr = std::make_shared( - phi::make_intrusive(phi::TransToPtenPlace(backend)), + phi::make_intrusive(phi::TransToPhiPlace(backend)), phi::DenseTensorMeta()); results[i] = tensor_ptr.get(); out->emplace_back(); diff --git a/paddle/phi/api/lib/data_transform.cc b/paddle/phi/api/lib/data_transform.cc index 2074ddd8a9127a652970daf44eedc1ee737d83ef..ae67e2ebb35ccef7fe07ee8c76db33a459b1dfce 100644 --- a/paddle/phi/api/lib/data_transform.cc +++ b/paddle/phi/api/lib/data_transform.cc @@ -38,7 +38,7 @@ inline bool NeedTransformPlace(const paddle::platform::Place& input, const TransformFlag& transform_flag) { bool ret = transform_flag.need_trans_backend() && target != Backend::ALL_BACKEND && - !platform::is_same_place(input, phi::TransToPtenPlace(target)); + !platform::is_same_place(input, phi::TransToPhiPlace(target)); return ret; } @@ -168,10 +168,10 @@ phi::DenseTensor TransformData(const phi::DenseTensor& tensor, out.place(), target_args_def.backend, transform_flag)) { phi::DenseTensor result( phi::make_intrusive( - phi::TransToPtenPlace(target_args_def.backend)), + phi::TransToPhiPlace(target_args_def.backend)), {out.dtype(), out.dims(), out.layout()}); framework::TransDataDevice( - out, phi::TransToPtenPlace(target_args_def.backend), &result); + out, phi::TransToPhiPlace(target_args_def.backend), &result); out = result; } return out; diff --git a/paddle/phi/api/lib/kernel_dispatch.cc b/paddle/phi/api/lib/kernel_dispatch.cc index 5251473f3b5c9ab272499436c8a2091725449644..0e3ca1af4967c2bf2ae302ea656a31198d187f01 100644 --- a/paddle/phi/api/lib/kernel_dispatch.cc +++ b/paddle/phi/api/lib/kernel_dispatch.cc @@ -21,7 +21,7 @@ namespace experimental { namespace detail { BackendSet GetTensorBackendSet(const Tensor& t) { - BackendSet backend_set(phi::TransToPtenBackend(t.inner_place())); + BackendSet backend_set(phi::TransToPhiBackend(t.inner_place())); switch (t.layout()) { case DataLayout::MKLDNN: backend_set = backend_set | BackendSet(Backend::MKLDNN); @@ -53,7 +53,7 @@ std::size_t CountLeadingZeros(uint64_t val) { phi::DeviceContext* GetDeviceContextByBackend(phi::Backend backend) { auto& pool = paddle::platform::DeviceContextPool::Instance(); - return pool.Get(phi::TransToPtenPlace(backend)); + return pool.Get(phi::TransToPhiPlace(backend)); } DataType ParseDataType(DataType dtype) { return dtype; } @@ -83,7 +83,7 @@ DataType ParseDataTypeWithInputOrder(DataType dtype, const Tensor& tensor) { Backend ParseBackend(Backend backend) { return backend; } Backend ParseBackend(const Tensor& tensor) { - return phi::TransToPtenBackend(tensor.inner_place()); + return phi::TransToPhiBackend(tensor.inner_place()); } Backend ParseBackendWithInputOrder(Backend backend, const Tensor& tensor) { diff --git a/paddle/phi/api/lib/sparse_api.cc b/paddle/phi/api/lib/sparse_api.cc index c0c10e0ac6a48be054b1c8292410fe184ddf2694..9e1f59c0aa74329b15efcbff123b137fbf0b1360 100644 --- a/paddle/phi/api/lib/sparse_api.cc +++ b/paddle/phi/api/lib/sparse_api.cc @@ -86,11 +86,11 @@ PADDLE_API Tensor to_sparse_coo(const Tensor& x, // create empty SparseCooTensor phi::DenseTensor non_zero_indices( phi::make_intrusive( - phi::TransToPtenPlace(backend)), + phi::TransToPhiPlace(backend)), std::move(indices_meta)); phi::DenseTensor non_zero_elements( phi::make_intrusive( - phi::TransToPtenPlace(backend)), + phi::TransToPhiPlace(backend)), std::move(elements_meta)); auto coo = std::make_shared( non_zero_indices, non_zero_elements, x.dims()); @@ -148,15 +148,15 @@ PADDLE_API Tensor to_sparse_csr(const Tensor& x, Backend backend) { // create empty SparseCooTensor phi::DenseTensor non_zero_crows( phi::make_intrusive( - phi::TransToPtenPlace(backend)), + phi::TransToPhiPlace(backend)), std::move(crows_meta)); phi::DenseTensor non_zero_cols( phi::make_intrusive( - phi::TransToPtenPlace(backend)), + phi::TransToPhiPlace(backend)), std::move(cols_meta)); phi::DenseTensor non_zero_elements( phi::make_intrusive( - phi::TransToPtenPlace(backend)), + phi::TransToPhiPlace(backend)), std::move(elements_meta)); auto csr = std::make_shared( non_zero_crows, non_zero_cols, non_zero_elements, x.dims()); @@ -211,7 +211,7 @@ PADDLE_API Tensor to_dense(const Tensor& x, Backend backend) { // create empty SparseCooTensor auto dense_out = std::make_shared( phi::make_intrusive( - phi::TransToPtenPlace(backend)), + phi::TransToPhiPlace(backend)), std::move(dense_meta)); kernel_context.EmplaceBackOutput(dense_out.get()); diff --git a/paddle/phi/api/lib/tensor.cc b/paddle/phi/api/lib/tensor.cc index ada08019f678aba59ee9cb27afc20382be62fd8b..311dd0fc30941d2afb9f1bc1e7ae57f3a449a254 100644 --- a/paddle/phi/api/lib/tensor.cc +++ b/paddle/phi/api/lib/tensor.cc @@ -33,7 +33,7 @@ limitations under the License. */ * * We hope to organize the basic implementation of Tensor and the logic related * to Tensor computation into an independent library, which we call - * [Tensor Operation Library, pten], so we extract or rewrite the original + * [Tensor Operation Library, phi], so we extract or rewrite the original * Kernels. * * In the future, the training library, inference library and custom operators diff --git a/paddle/phi/api/lib/tensor_method.cc b/paddle/phi/api/lib/tensor_method.cc index 7308a9d752c7a7a4a5970c0040e82ce2d65f613e..aefa26952d1e5f224112576bfbd74be80cca72cc 100644 --- a/paddle/phi/api/lib/tensor_method.cc +++ b/paddle/phi/api/lib/tensor_method.cc @@ -88,7 +88,7 @@ void Tensor::copy_(const Tensor &src, bool blocking) { src.name())); } auto copy_tensor = - src.copy_to(phi::TransToPtenBackend(src.inner_place()), blocking); + src.copy_to(phi::TransToPhiBackend(src.inner_place()), blocking); set_impl(copy_tensor.impl()); } diff --git a/paddle/phi/api/lib/utils/tensor_utils.cc b/paddle/phi/api/lib/utils/tensor_utils.cc index fc56d201fe3ccc736fdef834e69426e5f0384bf9..31325e22afae31e55a3a2d939739d6745ccd3d36 100644 --- a/paddle/phi/api/lib/utils/tensor_utils.cc +++ b/paddle/phi/api/lib/utils/tensor_utils.cc @@ -31,13 +31,13 @@ void SetLoD(DstLoD* dst, const SrcLoD& src) { } } -std::unique_ptr MakePtenDenseTensor( +std::unique_ptr MakePhiDenseTensor( const paddle::framework::Tensor& src) { return std::make_unique(src); } -phi::Scalar MakePtenScalarFromVar(const framework::Variable& variable) { - auto expected_place = phi::TransToPtenPlace(phi::Backend::CPU); +phi::Scalar MakePhiScalarFromVar(const framework::Variable& variable) { + auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU); if (variable.IsType()) { const auto& tensor = variable.Get(); if (!platform::is_same_place(tensor.place(), expected_place)) { @@ -55,21 +55,21 @@ phi::Scalar MakePtenScalarFromVar(const framework::Variable& variable) { } } -phi::ScalarArray MakePtenScalarArray(const paddle::framework::Tensor& src) { +phi::ScalarArray MakePhiScalarArray(const paddle::framework::Tensor& src) { return {src}; } -phi::ScalarArray MakePtenScalarArrayFromVar( +phi::ScalarArray MakePhiScalarArrayFromVar( const framework::Variable& variable) { - auto expected_place = phi::TransToPtenPlace(phi::Backend::CPU); + auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU); if (variable.IsType()) { const auto& tensor = variable.Get(); if (!platform::is_same_place(tensor.place(), expected_place)) { framework::LoDTensor tmp_tensor; framework::TensorCopySync(tensor, expected_place, &tmp_tensor); - return MakePtenScalarArray(tmp_tensor); + return MakePhiScalarArray(tmp_tensor); } else { - return MakePtenScalarArray(tensor); + return MakePhiScalarArray(tensor); } } else { PADDLE_THROW(platform::errors::Unimplemented( @@ -80,12 +80,12 @@ phi::ScalarArray MakePtenScalarArrayFromVar( } // TODO(chentianyu03): Inplace with ScalarArray constructor -phi::ScalarArray MakePtenScalarArrayFromVarList( +phi::ScalarArray MakePhiScalarArrayFromVarList( const std::vector& variable_list) { if (variable_list.size() == 0) { return phi::ScalarArray(); } - auto expected_place = phi::TransToPtenPlace(phi::Backend::CPU); + auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU); std::vector vector_data; vector_data.reserve(variable_list.size()); diff --git a/paddle/phi/api/lib/utils/tensor_utils.h b/paddle/phi/api/lib/utils/tensor_utils.h index 51aca6a52b41cd59858f3c138423c3debdb40eaf..8b30d5421ab943d568a046ca0fe4698849780ffd 100644 --- a/paddle/phi/api/lib/utils/tensor_utils.h +++ b/paddle/phi/api/lib/utils/tensor_utils.h @@ -30,17 +30,16 @@ limitations under the License. */ namespace paddle { namespace experimental { -std::unique_ptr MakePtenDenseTensor( +std::unique_ptr MakePhiDenseTensor( const paddle::framework::Tensor& src); -phi::ScalarArray MakePtenScalarArray(const paddle::framework::Tensor& src); +phi::ScalarArray MakePhiScalarArray(const paddle::framework::Tensor& src); -phi::Scalar MakePtenScalarFromVar(const framework::Variable& variable); +phi::Scalar MakePhiScalarFromVar(const framework::Variable& variable); -phi::ScalarArray MakePtenScalarArrayFromVar( - const framework::Variable& variable); +phi::ScalarArray MakePhiScalarArrayFromVar(const framework::Variable& variable); -phi::ScalarArray MakePtenScalarArrayFromVarList( +phi::ScalarArray MakePhiScalarArrayFromVarList( const std::vector& variable_list); void ResetTensorDtypeAndLayoutByArgDef(phi::TensorBase* dst, diff --git a/paddle/phi/backends/all_context.h b/paddle/phi/backends/all_context.h index 3fe03905e42dd33afeedb3a04c2deae6fb0ca1ee..57e6f084fd4c9a643822ddeb46418b0587cb982e 100644 --- a/paddle/phi/backends/all_context.h +++ b/paddle/phi/backends/all_context.h @@ -18,7 +18,7 @@ limitations under the License. */ // In order to avoid including the header files of each backend in turn, // add this header file // Note: Limit the entry of DeviceContext to backends to avoid multiple include -// path replacement after implementing pten DeviceContext +// path replacement after implementing phi DeviceContext #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/custom/custom_context.h" diff --git a/paddle/phi/backends/cpu/cpu_context.h b/paddle/phi/backends/cpu/cpu_context.h index e67df65850f15545d7da7a21c5edf30c53661b4d..aa14c2a8e3862139b3149bbcdcfa169d7c292377 100644 --- a/paddle/phi/backends/cpu/cpu_context.h +++ b/paddle/phi/backends/cpu/cpu_context.h @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/phi/backends/cpu/forwards.h" #include "paddle/phi/core/device_context.h" -// TODO(wilber): Do we need to use place in pten kernel? +// TODO(wilber): Do we need to use place in phi kernel? #include "paddle/phi/common/place.h" namespace phi { diff --git a/paddle/phi/backends/gpu/gpu_context.cc b/paddle/phi/backends/gpu/gpu_context.cc index 28057abed542abd2c120d1199dab7ba776929812..dbcc1660c6472cdddaaa3bea72854f61370c19a0 100644 --- a/paddle/phi/backends/gpu/gpu_context.cc +++ b/paddle/phi/backends/gpu/gpu_context.cc @@ -49,7 +49,7 @@ limitations under the License. */ // without eigen. #include "unsupported/Eigen/CXX11/Tensor" -// TODO(pten): remove fluid header. +// TODO(phi): remove fluid header. #include "paddle/fluid/platform/enforce.h" namespace phi { diff --git a/paddle/phi/backends/gpu/rocm/rocm_info.cc b/paddle/phi/backends/gpu/rocm/rocm_info.cc index 11dd4f724878266d52fdcbeee031b6ac6a9a9438..23e58d34b25725c048a39244d27f0afd0a917e0f 100644 --- a/paddle/phi/backends/gpu/rocm/rocm_info.cc +++ b/paddle/phi/backends/gpu/rocm/rocm_info.cc @@ -15,7 +15,7 @@ #include #include "paddle/phi/backends/gpu/gpu_info.h" -// TODO(pten): remove fluid headers. +// TODO(phi): remove fluid headers. #include "paddle/fluid/platform/enforce.h" static std::once_flag g_device_props_size_init_flag; diff --git a/paddle/phi/backends/xpu/xpu_info.cc b/paddle/phi/backends/xpu/xpu_info.cc index 96e95df7a9886f2bb1b5485c822a98d4f42b5f12..d454fc0734c66aca37a55c53ec5a2d9206cfcc5b 100644 --- a/paddle/phi/backends/xpu/xpu_info.cc +++ b/paddle/phi/backends/xpu/xpu_info.cc @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/phi/backends/xpu/xpu_header.h" #include "paddle/phi/common/place.h" -// TODO(wilber): The pten computing library requires a component to manage +// TODO(wilber): The phi computing library requires a component to manage // flags. #include "paddle/fluid/platform/flags.h" diff --git a/paddle/phi/common/layout.h b/paddle/phi/common/layout.h index 30832bd60bc0ea167b37de08240aad06c0fe7d1b..648fc02d054cbfd89991e66801c1dac5dffbfe69 100644 --- a/paddle/phi/common/layout.h +++ b/paddle/phi/common/layout.h @@ -32,7 +32,7 @@ enum class DataLayout { NUM_DATA_LAYOUTS, // See Note [ Why we need ALL in basic kernel key member? ] ALL_LAYOUT = UNDEFINED, - // Note: Unify pten DataLayout and fluid::framework::DataLayout, + // Note: Unify phi DataLayout and fluid::framework::DataLayout, // for compatible with fluid DataLayout, here need prefix `k` // Note: The original `kAnyLayout (enum value 2)` is a strange design. // `kAnyLayout` originally cannot represent any kind of Layout, diff --git a/paddle/phi/common/place.cc b/paddle/phi/common/place.cc index bc179e8fed74e22fd85d7ff9372d816edfdce575..644bf3679af2a3ebf05f739a6e8d42011c7e664c 100644 --- a/paddle/phi/common/place.cc +++ b/paddle/phi/common/place.cc @@ -43,7 +43,7 @@ const char *AllocationTypeStr(AllocationType type) { case AllocationType::MLU: return "mlu"; default: - PD_THROW("Invalid pten device type."); + PD_THROW("Invalid phi device type."); return {}; } } diff --git a/paddle/phi/core/compat/arg_map_context.h b/paddle/phi/core/compat/arg_map_context.h index 39cb3fb5692679ccd624fd2d79bec2bbeb04d257..af29b3bab5c3cc4b2e1caeb4eee9689179464d01 100644 --- a/paddle/phi/core/compat/arg_map_context.h +++ b/paddle/phi/core/compat/arg_map_context.h @@ -79,7 +79,7 @@ class ArgumentMappingContext { virtual bool HasOutput(const std::string& name) const = 0; virtual bool HasAttr(const std::string& name) const = 0; - // now we can't use Attribute here, it will cause pten relay on + // now we can't use Attribute here, it will cause phi relay on // boost::variant and BlockDesc virtual paddle::any Attr(const std::string& name) const = 0; diff --git a/paddle/phi/core/compat/convert_utils.cc b/paddle/phi/core/compat/convert_utils.cc index f7dab1d34c98015bb5b8922fa0baa54890f43735..3b7a733ede90464328600ebd3c7d371314b99cc3 100644 --- a/paddle/phi/core/compat/convert_utils.cc +++ b/paddle/phi/core/compat/convert_utils.cc @@ -25,7 +25,7 @@ limitations under the License. */ namespace phi { -Backend TransToPtenBackend(const phi::Place& place) { +Backend TransToPhiBackend(const phi::Place& place) { if (place.GetType() == phi::AllocationType::CPU) { return Backend::CPU; } else if (place.GetType() == phi::AllocationType::GPU) { @@ -41,7 +41,7 @@ Backend TransToPtenBackend(const phi::Place& place) { } } -phi::Place TransToPtenPlace(const Backend& backend, bool set_device_id) { +phi::Place TransToPhiPlace(const Backend& backend, bool set_device_id) { // NOTE(zhiqiu): GetCurrentDeviceId not always success, and device id is not // always needed. // So, add set_device_id parameter here. @@ -87,21 +87,21 @@ phi::Place TransToPtenPlace(const Backend& backend, bool set_device_id) { } } -std::string TransToPtenKernelName(const std::string& fluid_op_name) { +std::string TransToPhiKernelName(const std::string& fluid_op_name) { return OpUtilsMap::Instance().GetBaseKernelName(fluid_op_name); } -const std::string& TransToFluidOpName(const std::string& pten_kernel_name) { +const std::string& TransToFluidOpName(const std::string& phi_kernel_name) { auto& base_kernel_name_map = OpUtilsMap::Instance().base_kernel_name_map(); auto it = std::find_if(base_kernel_name_map.begin(), base_kernel_name_map.end(), - [&pten_kernel_name](const auto& pair) { - return pair.second == pten_kernel_name; + [&phi_kernel_name](const auto& pair) { + return pair.second == phi_kernel_name; }); if (it != base_kernel_name_map.end()) { return it->first; } - return pten_kernel_name; + return phi_kernel_name; } } // namespace phi diff --git a/paddle/phi/core/compat/convert_utils.h b/paddle/phi/core/compat/convert_utils.h index 058f0ecdf7bc2b5c81a55eb1a6e94cb5ddc30296..621459764873e6681d57813b227076db0b44dd04 100644 --- a/paddle/phi/core/compat/convert_utils.h +++ b/paddle/phi/core/compat/convert_utils.h @@ -22,10 +22,10 @@ limitations under the License. */ namespace phi { -std::string TransToPtenKernelName(const std::string& fluid_op_name); -const std::string& TransToFluidOpName(const std::string& pten_kernel_name); +std::string TransToPhiKernelName(const std::string& fluid_op_name); +const std::string& TransToFluidOpName(const std::string& phi_kernel_name); -Backend TransToPtenBackend(const phi::Place& place); -phi::Place TransToPtenPlace(const Backend& backend, bool set_device_id = true); +Backend TransToPhiBackend(const phi::Place& place); +phi::Place TransToPhiPlace(const Backend& backend, bool set_device_id = true); } // namespace phi diff --git a/paddle/phi/core/custom_kernel.cc b/paddle/phi/core/custom_kernel.cc index 75ff9cc28600373eb1f074c0ed91b774ec9ab85a..f84a2bd8d9c5d0634f29485fc07f649ea9fb1b9e 100644 --- a/paddle/phi/core/custom_kernel.cc +++ b/paddle/phi/core/custom_kernel.cc @@ -22,7 +22,7 @@ void RegisterCustomKernels(const CustomKernelMap& custom_kernel_map) { for (auto& pair : kernel_info_map) { PADDLE_ENFORCE_EQ( - KernelFactory::Instance().HasCompatiblePtenKernel(pair.first), + KernelFactory::Instance().HasCompatiblePhiKernel(pair.first), true, phi::errors::InvalidArgument( "The kernel %s is not ready for custom kernel registering.", diff --git a/paddle/phi/core/dense_tensor_impl.cc b/paddle/phi/core/dense_tensor_impl.cc index 6ce8bea35d9dd68353a6677b6e59d3e004c68185..29e7dc01f32db20e3756677fe8a48fcb138b3883 100644 --- a/paddle/phi/core/dense_tensor_impl.cc +++ b/paddle/phi/core/dense_tensor_impl.cc @@ -161,7 +161,7 @@ void* DenseTensor::mutable_data(const Place& place, /* @jim19930609: The following "mutable_data" only supports specific dtypes defined in OpProto. This part need another clean up once the data type across Fluid - and Pten get unified. + and Phi get unified. */ template inline T* DenseTensor::mutable_data(const DDim& dims, diff --git a/paddle/phi/core/kernel_factory.h b/paddle/phi/core/kernel_factory.h index b31bedd958b4b5bfdf32e80ab81e44dd3307e520..be91409762635e8aabdd6953aa5527d94959e4b2 100644 --- a/paddle/phi/core/kernel_factory.h +++ b/paddle/phi/core/kernel_factory.h @@ -225,8 +225,8 @@ class KernelFactory { KernelNameMap& kernels() { return kernels_; } - bool HasCompatiblePtenKernel(const std::string& op_type) const { - return kernels_.find(TransToPtenKernelName(op_type)) != kernels_.end(); + bool HasCompatiblePhiKernel(const std::string& op_type) const { + return kernels_.find(TransToPhiKernelName(op_type)) != kernels_.end(); } const Kernel& SelectKernelOrThrowError(const std::string& kernel_name, diff --git a/paddle/phi/core/utils/data_type.h b/paddle/phi/core/utils/data_type.h index efb01d6664238f2dacf6a7860c41fd6ce58757f6..a190b222f86ac4145f7ad02eab043a03038c1096 100644 --- a/paddle/phi/core/utils/data_type.h +++ b/paddle/phi/core/utils/data_type.h @@ -23,39 +23,39 @@ limitations under the License. */ namespace phi { -#define _PtenForEachDataTypeHelper_(callback, cpp_type, data_type) \ +#define _PhiForEachDataTypeHelper_(callback, cpp_type, data_type) \ callback(cpp_type, data_type); -#define _PtenForEachDataType_(callback) \ - _PtenForEachDataTypeHelper_(callback, float, DataType::FLOAT32); \ - _PtenForEachDataTypeHelper_( \ +#define _PhiForEachDataType_(callback) \ + _PhiForEachDataTypeHelper_(callback, float, DataType::FLOAT32); \ + _PhiForEachDataTypeHelper_( \ callback, ::phi::dtype::float16, DataType::FLOAT16); \ - _PtenForEachDataTypeHelper_( \ + _PhiForEachDataTypeHelper_( \ callback, ::phi::dtype::bfloat16, DataType::BFLOAT16); \ - _PtenForEachDataTypeHelper_(callback, double, DataType::FLOAT64); \ - _PtenForEachDataTypeHelper_(callback, int, DataType::INT32); \ - _PtenForEachDataTypeHelper_(callback, int64_t, DataType::INT64); \ - _PtenForEachDataTypeHelper_(callback, bool, DataType::BOOL); \ - _PtenForEachDataTypeHelper_(callback, uint8_t, DataType::UINT8); \ - _PtenForEachDataTypeHelper_(callback, int16_t, DataType::INT16); \ - _PtenForEachDataTypeHelper_(callback, int8_t, DataType::INT8); \ - _PtenForEachDataTypeHelper_( \ + _PhiForEachDataTypeHelper_(callback, double, DataType::FLOAT64); \ + _PhiForEachDataTypeHelper_(callback, int, DataType::INT32); \ + _PhiForEachDataTypeHelper_(callback, int64_t, DataType::INT64); \ + _PhiForEachDataTypeHelper_(callback, bool, DataType::BOOL); \ + _PhiForEachDataTypeHelper_(callback, uint8_t, DataType::UINT8); \ + _PhiForEachDataTypeHelper_(callback, int16_t, DataType::INT16); \ + _PhiForEachDataTypeHelper_(callback, int8_t, DataType::INT8); \ + _PhiForEachDataTypeHelper_( \ callback, ::phi::dtype::complex, DataType::COMPLEX64); \ - _PtenForEachDataTypeHelper_( \ + _PhiForEachDataTypeHelper_( \ callback, ::phi::dtype::complex, DataType::COMPLEX128); template inline void VisitDataType(phi::DataType type, Visitor visitor) { -#define PtenVisitDataTypeCallback(cpp_type, data_type) \ - do { \ - if (type == data_type) { \ - visitor.template apply(); \ - return; \ - } \ +#define PhiVisitDataTypeCallback(cpp_type, data_type) \ + do { \ + if (type == data_type) { \ + visitor.template apply(); \ + return; \ + } \ } while (0) - _PtenForEachDataType_(PtenVisitDataTypeCallback); -#undef PtenVisitDataTypeCallback + _PhiForEachDataType_(PhiVisitDataTypeCallback); +#undef PhiVisitDataTypeCallback PADDLE_THROW(phi::errors::Unimplemented( "Not supported phi::DataType(%d) as data type.", static_cast(type))); } diff --git a/paddle/phi/kernels/diagonal_kernel.h b/paddle/phi/kernels/diagonal_kernel.h index f233ba2a956276ad22819d49c30fbcbaf8a671c3..7cf7282307a4b91a771441d3218121b606afdf81 100644 --- a/paddle/phi/kernels/diagonal_kernel.h +++ b/paddle/phi/kernels/diagonal_kernel.h @@ -25,4 +25,4 @@ void DiagonalKernel(const Context& dev_ctx, int axis1, int axis2, DenseTensor* out); -} // pten +} // phi diff --git a/paddle/phi/kernels/digamma_grad_kernel.h b/paddle/phi/kernels/digamma_grad_kernel.h index b95d98895aa8edda497a730281603028b98bc4f0..38912a5ccc442b6ea5fb484b708754dd706ae706 100644 --- a/paddle/phi/kernels/digamma_grad_kernel.h +++ b/paddle/phi/kernels/digamma_grad_kernel.h @@ -24,4 +24,4 @@ void DigammaGradKernel(const Context& ctx, const DenseTensor& x, DenseTensor* x_grad); -} // namepsace pten +} // namepsace phi diff --git a/paddle/phi/kernels/digamma_kernel.h b/paddle/phi/kernels/digamma_kernel.h index 1772a33e4ee4cd88d80705971462b632c1015c3c..ce25f2e148e963054fcfa2a51321954b45a4297b 100644 --- a/paddle/phi/kernels/digamma_kernel.h +++ b/paddle/phi/kernels/digamma_kernel.h @@ -21,4 +21,4 @@ namespace phi { template void DigammaKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out); -} // namepsace pten +} // namepsace phi diff --git a/paddle/phi/kernels/expand_kernel.h b/paddle/phi/kernels/expand_kernel.h index eb32ed24568599b2966f1f7772b8e9f6e710063b..fb5a0112ffcf7120314471db3c30b0e72a2b9c81 100644 --- a/paddle/phi/kernels/expand_kernel.h +++ b/paddle/phi/kernels/expand_kernel.h @@ -26,4 +26,4 @@ void ExpandKernel(const Context& ctx, const ScalarArray& shape, DenseTensor* out); -} // namepsace pten +} // namepsace phi diff --git a/paddle/phi/kernels/masked_select_grad_kernel.h b/paddle/phi/kernels/masked_select_grad_kernel.h index fd16091a665ca983cd5185eb7e12a2928052794a..f9db1fcd2acc7a7924d9b9e393550a74d0d0ac81 100644 --- a/paddle/phi/kernels/masked_select_grad_kernel.h +++ b/paddle/phi/kernels/masked_select_grad_kernel.h @@ -24,4 +24,4 @@ void MaskedSelectGradKernel(const Context& dev_ctx, const DenseTensor& mask, DenseTensor* x_grad); -} // namspace pten +} // namspace phi diff --git a/paddle/phi/kernels/masked_select_kernel.h b/paddle/phi/kernels/masked_select_kernel.h index abd3c318986d81cb14c0f8ecdd449faf1b48cf3a..471f650690d367da132e0ad2e8da441394b7aff2 100644 --- a/paddle/phi/kernels/masked_select_kernel.h +++ b/paddle/phi/kernels/masked_select_kernel.h @@ -23,4 +23,4 @@ void MaskedSelectKernel(const Context& dev_ctx, const DenseTensor& mask, DenseTensor* out); -} // namspace pten +} // namspace phi diff --git a/paddle/phi/kernels/transfer_layout_kernel.cc b/paddle/phi/kernels/transfer_layout_kernel.cc index c981ca115850707857ed1f25a9e546138d9d950c..60df877355b8268efafddfdc2b452617cdadf9df 100644 --- a/paddle/phi/kernels/transfer_layout_kernel.cc +++ b/paddle/phi/kernels/transfer_layout_kernel.cc @@ -69,7 +69,7 @@ void TransferLayoutKernel(const Context& dev_ctx, } // namespace phi -PD_REGISTER_GENERAL_KERNEL(pten_transfer_layout, +PD_REGISTER_GENERAL_KERNEL(phi_transfer_layout, CPU, ALL_LAYOUT, phi::TransferLayoutKernel, diff --git a/paddle/phi/ops/compat/scale_sig.cc b/paddle/phi/ops/compat/scale_sig.cc index 915ea4ce302aea6a4a11f1c0745229fb09c1d8c8..95deb007d99d9c42bbc2cc22faed2a44fa58b0f5 100644 --- a/paddle/phi/ops/compat/scale_sig.cc +++ b/paddle/phi/ops/compat/scale_sig.cc @@ -20,7 +20,7 @@ namespace phi { * Note [ Why does the ArgumentMapping function need to be so complicated? ] * * In order to meet the requirements of infrt, the function used to match Op - * and Kernel parameters, need to be placed in pten as a compatible component, + * and Kernel parameters, need to be placed in phi as a compatible component, * and does not depend on fluid. * * Because infrt not only needs to dynamically call this argument mapping diff --git a/paddle/phi/tests/api/scale_api.h b/paddle/phi/tests/api/scale_api.h index 829b93b88b4f9d9f8bc0ffc325a5937b002b2633..d93f00129b9a14170b979dfd23eb6e292e996ce8 100644 --- a/paddle/phi/tests/api/scale_api.h +++ b/paddle/phi/tests/api/scale_api.h @@ -71,7 +71,7 @@ PADDLE_API Tensor scale_kernel_context(const Tensor& x, auto dense_out = std::make_shared( phi::make_intrusive( - phi::TransToPtenPlace(kernel_backend)), + phi::TransToPhiPlace(kernel_backend)), phi::DenseTensorMeta()); phi::MetaTensor meta_out(dense_out.get()); phi::UnchangedInferMeta(*dense_x, &meta_out); @@ -238,7 +238,7 @@ Tensor scale_switch_case(const Tensor& x, auto dense_out = std::make_shared( phi::make_intrusive( - phi::TransToPtenPlace(kernel_backend)), + phi::TransToPhiPlace(kernel_backend)), phi::DenseTensorMeta()); phi::MetaTensor meta_out(dense_out.get()); phi::UnchangedInferMeta(*dense_x, &meta_out); diff --git a/paddle/phi/tests/api/test_data_transform.cc b/paddle/phi/tests/api/test_data_transform.cc index 2e38a1593461e26849e05935ac42e58a3831162a..a3c497bd427ae040b33dce241a70ecaafee5fbcc 100644 --- a/paddle/phi/tests/api/test_data_transform.cc +++ b/paddle/phi/tests/api/test_data_transform.cc @@ -83,7 +83,7 @@ TEST(Tensor, data_transform_diff_place) { ASSERT_EQ(out.layout(), phi::DataLayout::NCHW); ASSERT_EQ(out.initialized(), true); ASSERT_EQ(out.impl()->place(), - phi::TransToPtenPlace(experimental::Backend::GPU)); + phi::TransToPhiPlace(experimental::Backend::GPU)); auto ref_out = experimental::copy_to(out, experimental::Backend::CPU, true); diff --git a/paddle/phi/tests/api/test_pten_tensor.cc b/paddle/phi/tests/api/test_pten_tensor.cc index de88561c4d675c137afb3fab664342f15de72c86..dc2883c1794e2c986ed5446981b749f5f4dd0bc2 100644 --- a/paddle/phi/tests/api/test_pten_tensor.cc +++ b/paddle/phi/tests/api/test_pten_tensor.cc @@ -211,7 +211,7 @@ void TestJudgeTensorType() { CHECK(test_tensor.is_dense_tensor() == true); } -TEST(PtenTensor, All) { +TEST(PhiTensor, All) { VLOG(2) << "TestCopy"; GroupTestCopy(); VLOG(2) << "TestDtype"; diff --git a/paddle/phi/tests/common/test_place.cc b/paddle/phi/tests/common/test_place.cc index c311a6733b04df645c0ee4c70e04b9f635377b04..ed2eb7126ed289c0eb31f4ac14be8492515afa60 100644 --- a/paddle/phi/tests/common/test_place.cc +++ b/paddle/phi/tests/common/test_place.cc @@ -20,7 +20,7 @@ limitations under the License. */ namespace phi { namespace tests { -TEST(PtenPlace, place) { +TEST(PhiPlace, place) { phi::Place place; EXPECT_EQ(place.GetType(), phi::AllocationType::UNDEFINED); diff --git a/paddle/phi/tests/core/test_custom_kernel.cc b/paddle/phi/tests/core/test_custom_kernel.cc index bc75e6ec45245eceb7f919cd96fd4e76f0af9409..d8e42c9d0d8b11d393dbb71776671d9cb50a7715 100644 --- a/paddle/phi/tests/core/test_custom_kernel.cc +++ b/paddle/phi/tests/core/test_custom_kernel.cc @@ -148,9 +148,9 @@ TEST(CustomKernel, custom_kernel_dot) { // 3.before register auto& kernel_factory_instance = phi::KernelFactory::Instance(); auto& kernels = phi::KernelFactory::Instance().kernels(); - EXPECT_TRUE(!kernel_factory_instance.HasCompatiblePtenKernel(op_name)); + EXPECT_TRUE(!kernel_factory_instance.HasCompatiblePhiKernel(op_name)); - // mock fake_dot is supported by phi for HasCompatiblePtenKernel check while + // mock fake_dot is supported by phi for HasCompatiblePhiKernel check while // registering auto& fake_dot_kernels = kernels[op_name]; @@ -251,7 +251,7 @@ TEST(CustomKernel, custom_kernel_dot) { phi::dtype::float16 fake_attr_f16 = phi::dtype::float16(5); phi::DataType fake_attr_dtype = phi::DataType::UINT32; paddle::framework::LoDTensor tmp_tensor; - tmp_tensor.mutable_data({1}, phi::TransToPtenPlace(backend)); + tmp_tensor.mutable_data({1}, phi::TransToPhiPlace(backend)); phi::Scalar fake_attr_scalar{tmp_tensor}; phi::ScalarArray fake_attr_scalar_array; std::vector fake_attr_int64_vec; @@ -271,7 +271,7 @@ TEST(CustomKernel, custom_kernel_dot) { auto dense_out = std::make_shared( phi::make_intrusive( - phi::TransToPtenPlace(backend)), + phi::TransToPhiPlace(backend)), phi::DenseTensorMeta()); phi::MetaTensor meta_out(dense_out.get()); diff --git a/python/paddle/fluid/tests/custom_op/custom_linear_op.cc b/python/paddle/fluid/tests/custom_op/custom_linear_op.cc index 6e0b44b71f7f87447bd66a052f0a394ab38b2874..76158596cb815022bc1a92cde75c9bd51be92857 100644 --- a/python/paddle/fluid/tests/custom_op/custom_linear_op.cc +++ b/python/paddle/fluid/tests/custom_op/custom_linear_op.cc @@ -17,9 +17,9 @@ limitations under the License. */ #include "paddle/extension.h" // The linear implemented here must be passed in bias -std::vector PtenLinearForward(const paddle::Tensor& x, - const paddle::Tensor& weight, - const paddle::Tensor& bias) { +std::vector PhiLinearForward(const paddle::Tensor& x, + const paddle::Tensor& weight, + const paddle::Tensor& bias) { return { paddle::experimental::add(paddle::experimental::matmul(x, weight), bias)}; } @@ -90,6 +90,6 @@ std::vector LinearInferDtype( PD_BUILD_OP(pten_linear) .Inputs({"X", "Weight", "Bias"}) .Outputs({"Out"}) - .SetKernelFn(PD_KERNEL(PtenLinearForward)) + .SetKernelFn(PD_KERNEL(PhiLinearForward)) .SetInferShapeFn(PD_INFER_SHAPE(LinearInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(LinearInferDtype)); diff --git a/python/paddle/fluid/tests/unittests/test_get_all_registered_op_kernels.py b/python/paddle/fluid/tests/unittests/test_get_all_registered_op_kernels.py index 815598d9017665291878d43c6f1195d7681214f8..a429717bdaf37b3724820d3e074c38a216634cdf 100644 --- a/python/paddle/fluid/tests/unittests/test_get_all_registered_op_kernels.py +++ b/python/paddle/fluid/tests/unittests/test_get_all_registered_op_kernels.py @@ -19,13 +19,13 @@ from paddle import compat as cpt class TestGetAllRegisteredOpKernels(unittest.TestCase): - # reshape kernel is in fluid while not in pten - def test_pten_kernels(self): - self.assertTrue(core._get_all_register_op_kernels('pten')['sign']) + # reshape kernel is in fluid while not in phi + def test_phi_kernels(self): + self.assertTrue(core._get_all_register_op_kernels('phi')['sign']) with self.assertRaises(KeyError): - core._get_all_register_op_kernels('pten')['reshape'] + core._get_all_register_op_kernels('phi')['reshape'] - # sign kernel is removed from fluid and added into pten + # sign kernel is removed from fluid and added into phi def test_fluid_kernels(self): self.assertTrue(core._get_all_register_op_kernels('fluid')['reshape']) with self.assertRaises(KeyError): diff --git a/python/setup.py.in b/python/setup.py.in index f39429387dbc3e309edd1ebc60767071811c5ee4..ec1b1cbcb9510c80a42dff49fa1a5121a9cb487f 100755 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -571,13 +571,13 @@ def find_files(pattern, root, recursive=False): headers = ( # paddle level api headers list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle')) + - list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/api')) + # pten unify api header + list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/api')) + # phi unify api header list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/api/ext')) + # custom op api - list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/api/include')) + # pten api - list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/common')) + # pten common headers - # pten level api headers (low level api) - list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/core', recursive=True)) + # pten core headers - list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/backends', recursive=True)) + # pten backends headers + list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/api/include')) + # phi api + list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/common')) + # phi common headers + # phi level api headers (low level api) + list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/core', recursive=True)) + # phi core headers + list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/phi/backends', recursive=True)) + # phi backends headers # utila api headers list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/utils', recursive=True)) + # paddle utils headers ['@PADDLE_SOURCE_DIR@/paddle/fluid/platform/device/device_ext.h'])