From 905cefd4f6a61fd29a2128829c666f1a77e5d471 Mon Sep 17 00:00:00 2001 From: Huang Jiyi <43315610+huangjiyi@users.noreply.github.com> Date: Thu, 16 Feb 2023 10:29:18 +0800 Subject: [PATCH] [phi decoupling] remove variable.h in phi (#50407) * move variable_utils from phi_api_utils to fluid * fix coment * update include * fix bugs * fix bugs * fix bugs * fix bugs * fix bugs * update * update * fix CI-Windows-OpenBLAS * fix bugs * fix bugs * fix bugs * update include * move variable_utils to phi_utils * fix namespace --- paddle/fluid/eager/eager_tensor.h | 1 - paddle/fluid/framework/CMakeLists.txt | 14 ++- paddle/fluid/framework/custom_operator.cc | 1 - paddle/fluid/framework/infershape_utils.cc | 6 +- paddle/fluid/framework/operator.cc | 10 +- paddle/fluid/framework/phi_utils.cc | 97 ++++++++++++++++++- paddle/fluid/framework/phi_utils.h | 11 +++ paddle/fluid/imperative/prepared_operator.h | 8 +- paddle/fluid/imperative/tests/CMakeLists.txt | 2 +- paddle/fluid/operators/CMakeLists.txt | 2 +- paddle/fluid/operators/cast_op.h | 2 +- .../elementwise/elementwise_op_function.h | 2 +- .../elementwise/elementwise_op_impl.cu.h | 1 - paddle/fluid/operators/math/CMakeLists.txt | 2 +- .../fluid/operators/math/sequence_padding.h | 8 +- paddle/fluid/operators/matmul_v2_op.h | 2 +- paddle/fluid/operators/reduce_ops/reduce_op.h | 2 +- paddle/fluid/operators/reshape_op.cc | 1 - paddle/fluid/operators/split_op.cc | 3 +- paddle/fluid/pybind/eager.cc | 2 +- paddle/fluid/pybind/eager_functions.cc | 2 +- paddle/phi/api/lib/utils/CMakeLists.txt | 11 +-- paddle/phi/api/lib/utils/tensor_utils.cc | 97 ------------------- paddle/phi/api/lib/utils/tensor_utils.h | 12 --- paddle/phi/core/lod_utils.cc | 13 +++ paddle/phi/core/lod_utils.h | 5 + paddle/phi/kernels/CMakeLists.txt | 2 +- paddle/phi/kernels/funcs/sequence_scale.cc | 2 +- paddle/phi/kernels/funcs/sequence_scale.cu | 3 +- paddle/phi/kernels/funcs/sequence_scale.h | 3 +- .../phi/kernels/gpu/multiplex_grad_kernel.cu | 5 +- paddle/phi/kernels/gpu/multiplex_kernel.cu | 5 +- paddle/phi/kernels/impl/warpctc_kernel_impl.h | 5 +- 33 files changed, 177 insertions(+), 165 deletions(-) diff --git a/paddle/fluid/eager/eager_tensor.h b/paddle/fluid/eager/eager_tensor.h index 22b6d705538..15fbb192a84 100644 --- a/paddle/fluid/eager/eager_tensor.h +++ b/paddle/fluid/eager/eager_tensor.h @@ -19,7 +19,6 @@ #include "paddle/fluid/framework/variable.h" // Phi deps #include "paddle/phi/api/include/tensor.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" #include "paddle/phi/core/compat/convert_utils.h" namespace egr { diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 47ab96871e0..db67739905c 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -435,24 +435,28 @@ if(WITH_XPU) phi_utils SRCS phi_utils.cc DEPS lod_tensor + dense_tensor selected_rows_utils place phi var_type_traits - phi_api_utils op_info - xpu_op_list) + xpu_op_list + convert_utils + phi_api_utils) else() cc_library( phi_utils SRCS phi_utils.cc DEPS lod_tensor + dense_tensor selected_rows_utils place phi var_type_traits - phi_api_utils - op_info) + op_info + convert_utils + phi_api_utils) endif() if(WITH_XPU) @@ -1158,7 +1162,7 @@ cc_library( place var_type_traits phi - phi_api_utils + phi_utils op_info shape_inference sparse_coo_tensor) diff --git a/paddle/fluid/framework/custom_operator.cc b/paddle/fluid/framework/custom_operator.cc index 83d274789c5..fb409cffb1d 100644 --- a/paddle/fluid/framework/custom_operator.cc +++ b/paddle/fluid/framework/custom_operator.cc @@ -37,7 +37,6 @@ limitations under the License. */ #include "paddle/fluid/platform/dynload/dynamic_loader.h" #include "paddle/fluid/string/string_helper.h" #include "paddle/phi/api/all.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/utils/any.h" diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 1709334d569..0fd6a477817 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -618,7 +618,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, if (ctx->IsRuntime()) { Variable* var = PADDLE_GET_CONST(Variable*, infershape_input[0]); infer_meta_context.EmplaceBackAttr( - std::move(experimental::MakePhiScalarFromVar(*var))); + std::move(framework::MakePhiScalarFromVar(*var))); } else { phi::Scalar tensor_scalar(-1); tensor_scalar.SetFromTensor(true); @@ -670,10 +670,10 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, } if (infershape_inputs.size() != 1) { infer_meta_context.EmplaceBackAttr( - std::move(experimental::MakePhiIntArrayFromVarList(vars))); + std::move(framework::MakePhiIntArrayFromVarList(vars))); } else { infer_meta_context.EmplaceBackAttr( - std::move(experimental::MakePhiIntArrayFromVar(*vars[0]))); + std::move(framework::MakePhiIntArrayFromVar(*vars[0]))); } } else { // If is not in runtime, we will set default value(-1) for IntArray diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 76fe54dc627..fa1ee4e4be2 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -3227,8 +3227,8 @@ void OperatorWithKernel::BuildPhiKernelContext( } else { // scalar is in the input need_prepare_phi_data_ = true; auto& ins_vector = ctx.inputs.at(attr_names[i]); - phi_kernel_context->EmplaceBackAttr(std::move( - experimental::MakePhiScalarFromVar(*ins_vector.front()))); + phi_kernel_context->EmplaceBackAttr( + std::move(framework::MakePhiScalarFromVar(*ins_vector.front()))); } break; case phi::AttributeType::INT_ARRAY: @@ -3261,10 +3261,10 @@ void OperatorWithKernel::BuildPhiKernelContext( auto& ins_vector = ctx.inputs.at(attr_names[i]); if (ins_vector.size() == 1) { // ShapeTensor phi_kernel_context->EmplaceBackAttr(std::move( - experimental::MakePhiIntArrayFromVar(*ins_vector.front()))); + framework::MakePhiIntArrayFromVar(*ins_vector.front()))); } else { // ShapeTensorList - phi_kernel_context->EmplaceBackAttr(std::move( - experimental::MakePhiIntArrayFromVarList(ins_vector))); + phi_kernel_context->EmplaceBackAttr( + std::move(framework::MakePhiIntArrayFromVarList(ins_vector))); } } break; diff --git a/paddle/fluid/framework/phi_utils.cc b/paddle/fluid/framework/phi_utils.cc index 8da7b23a6a0..dde944a3d77 100644 --- a/paddle/fluid/framework/phi_utils.cc +++ b/paddle/fluid/framework/phi_utils.cc @@ -20,11 +20,11 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/selected_rows_utils.h" -#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/string/string_helper.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/compat/op_utils.h" #include "paddle/phi/core/kernel_factory.h" +#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/type_defs.h" namespace paddle { @@ -280,5 +280,100 @@ static void SetAllocationForUninitializedDenseTensor( dense_tensor->ResetHolder(shared_allocation); } +phi::Scalar MakePhiScalarFromVar(const framework::Variable& variable) { + auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU); + if (variable.IsType()) { + const auto& tensor = variable.Get(); + PADDLE_ENFORCE_EQ( + tensor.numel(), + 1UL, + platform::errors::InvalidArgument("The DenseTensor used to construct " + "the Scalar contains more than 1 " + "value, it contains `%d` values.", + tensor.numel())); + if (!platform::is_same_place(tensor.place(), expected_place)) { + phi::DenseTensor tmp_tensor; + framework::TensorCopySync(tensor, expected_place, &tmp_tensor); + return {tmp_tensor}; + } else { + return {tensor}; + } + } else { + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupport casting input `%s` type to Scalar when call pt " + "kernel.", + framework::ToTypeName(variable.Type()))); + } +} + +phi::IntArray MakePhiIntArrayFromVar(const framework::Variable& variable) { + if (variable.IsType()) { + const auto& tensor = variable.Get(); + return paddle::experimental::MakePhiIntArray(tensor); + } else { + PADDLE_THROW(platform::errors::Unimplemented( + "Unsupport casting input `%s` type to IntArray when call pt " + "kernel.", + framework::ToTypeName(variable.Type()))); + } +} + +// TODO(chentianyu03): Inplace with IntArray constructor +phi::IntArray MakePhiIntArrayFromVarList( + const std::vector& variable_list) { + if (variable_list.size() == 0) { + return phi::IntArray(); + } + auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU); + + std::vector vector_data; + vector_data.reserve(variable_list.size()); + + for (auto* var : variable_list) { + paddle::experimental::DataType data_type; + if (var->IsType()) { + const auto& tensor = var->Get(); + data_type = tensor.dtype(); + if (data_type == paddle::experimental::DataType::INT64) { + const auto& tensor = var->Get(); + if (tensor.IsInitialized() && + !platform::is_same_place(tensor.place(), expected_place)) { + phi::DenseTensor tmp_tensor; + framework::TensorCopySync(tensor, expected_place, &tmp_tensor); + vector_data.push_back(*tmp_tensor.data()); + } else { + vector_data.push_back(*tensor.data()); + } + } else if (data_type == paddle::experimental::DataType::INT32) { + const auto& tensor = var->Get(); + if (tensor.IsInitialized() && + !platform::is_same_place(tensor.place(), expected_place)) { + phi::DenseTensor tmp_tensor; + framework::TensorCopySync(tensor, expected_place, &tmp_tensor); + vector_data.push_back(*tmp_tensor.data()); + } else { + vector_data.push_back(*tensor.data()); + } + } else { + PADDLE_THROW(phi::errors::InvalidArgument( + "Data type error. When cast a LoDTensor to VectorTensor, " + "the data type of LoDTensor must be int32 or int64, " + "but now data type is %s.", + data_type)); + } + } else { + PADDLE_THROW(phi::errors::Unimplemented( + "Unsupport casting input `%s` type to VectorTensor when call pt " + "kernel.", + framework::ToTypeName(var->Type()))); + } + } + + phi::IntArray result{vector_data}; + result.SetFromTensor(true); + + return result; +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/phi_utils.h b/paddle/fluid/framework/phi_utils.h index 0c214176f27..9eef7caea22 100644 --- a/paddle/fluid/framework/phi_utils.h +++ b/paddle/fluid/framework/phi_utils.h @@ -23,6 +23,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_kernel_type.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/platform/macros.h" #include "paddle/fluid/platform/place.h" #include "paddle/phi/api/lib/utils/tensor_utils.h" @@ -86,5 +87,15 @@ struct ConvertToPhiContext { }; #endif +/* Make Phi Tensor from framework::Variable */ + +phi::Scalar MakePhiScalarFromVar(const framework::Variable& variable); + +phi::IntArray MakePhiIntArrayFromVar(const framework::Variable& variable); + +// TODO(chentianyu03): Inplace with IntArray constructor +phi::IntArray MakePhiIntArrayFromVarList( + const std::vector& variable_list); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index 00e059572d2..9473a3e3c50 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -434,8 +434,8 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, } } else { // scalar is in the input auto& ins_vector = ins.at(attr_names[i]); - kernel_ctx->EmplaceBackAttr(std::move( - experimental::MakePhiScalarFromVar(ins_vector[0]->Var()))); + kernel_ctx->EmplaceBackAttr( + std::move(framework::MakePhiScalarFromVar(ins_vector[0]->Var()))); } break; case phi::AttributeType::INT_ARRAY: @@ -468,7 +468,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, auto& ins_vector = ins.at(attr_names[i]); if (ins_vector.size() == 1) { // ShapeTensor kernel_ctx->EmplaceBackAttr(std::move( - experimental::MakePhiIntArrayFromVar(ins_vector[0]->Var()))); + framework::MakePhiIntArrayFromVar(ins_vector[0]->Var()))); } else { // ShapeTensorList std::vector variables; variables.reserve(ins_vector.size()); @@ -476,7 +476,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, variables.push_back(var_base->MutableVar()); } kernel_ctx->EmplaceBackAttr( - std::move(experimental::MakePhiIntArrayFromVarList(variables))); + std::move(framework::MakePhiIntArrayFromVarList(variables))); } } break; diff --git a/paddle/fluid/imperative/tests/CMakeLists.txt b/paddle/fluid/imperative/tests/CMakeLists.txt index 5bb32674df7..bc69e0afd96 100644 --- a/paddle/fluid/imperative/tests/CMakeLists.txt +++ b/paddle/fluid/imperative/tests/CMakeLists.txt @@ -46,7 +46,7 @@ cc_test( math_function phi_tensor phi_api - phi_api_utils) + phi_utils) cc_test( test_layer SRCS test_layer.cc diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 4a64c4411df..0498dae0bf5 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -95,7 +95,7 @@ if(WITH_UNITY_BUILD) include(unity_build_rule.cmake) endif() -set(OP_HEADER_DEPS ${OP_HEADER_DEPS} phi phi_api_utils backward_infermeta sparse_backward_infermeta static_prim_api) +set(OP_HEADER_DEPS ${OP_HEADER_DEPS} phi phi_utils backward_infermeta sparse_backward_infermeta static_prim_api) register_operators(EXCLUDES py_func_op warpctc_op dgc_op load_combine_op lstm_op run_program_op eye_op quantize_linear_op recurrent_op save_combine_op sparse_attention_op sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS}) diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index f4121573577..2dde7f2ce17 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -16,8 +16,8 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/platform/transform.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" #include "paddle/phi/kernels/cast_kernel.h" namespace paddle { diff --git a/paddle/fluid/operators/elementwise/elementwise_op_function.h b/paddle/fluid/operators/elementwise/elementwise_op_function.h index e1c91d43667..6fd13aad10a 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_function.h @@ -24,11 +24,11 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/transform.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" #include "paddle/phi/kernels/cpu/elementwise.h" #include "paddle/phi/kernels/cpu/elementwise_grad.h" diff --git a/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h b/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h index 1e9b87c9656..5e61a35acf4 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h @@ -18,7 +18,6 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor.h" // only can include the headers in paddle/top/api dirs -#include "paddle/phi/api/lib/utils/tensor_utils.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" namespace paddle { diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 9751eec11d8..0471caea90c 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -28,7 +28,7 @@ math_library(sampler DEPS generator) # math_library(math_function DEPS blas dense_tensor tensor) -math_library(sequence_padding) +math_library(sequence_padding DEPS lod_tensor) math_library(sequence_pooling DEPS math_function jit_kernel_helper) if(WITH_ASCEND_CL) math_library(beam_search DEPS math_function beam_search_npu) diff --git a/paddle/fluid/operators/math/sequence_padding.h b/paddle/fluid/operators/math/sequence_padding.h index ec59309f04f..53d34d8a66a 100644 --- a/paddle/fluid/operators/math/sequence_padding.h +++ b/paddle/fluid/operators/math/sequence_padding.h @@ -48,8 +48,8 @@ inline static size_t TotalSequenceLength( return total_seq_len; } -inline static void CheckDims(const framework::DDim& seq_tensor_dims, - const framework::DDim& pad_tensor_dims, +inline static void CheckDims(const phi::DDim& seq_tensor_dims, + const phi::DDim& pad_tensor_dims, const phi::Vector& seq_offset, int64_t padded_seq_len, int64_t step_width, @@ -57,7 +57,7 @@ inline static void CheckDims(const framework::DDim& seq_tensor_dims, PADDLE_ENFORCE_EQ( static_cast(seq_tensor_dims[0]), seq_offset.back(), - platform::errors::InvalidArgument( + phi::errors::InvalidArgument( "Value of 1st dimension of the sequence tensor should be " "equal to sum of lengths of all sequences. Expected %ld == %ld, but " "got %ld != %ld. Please check the input value.", @@ -70,7 +70,7 @@ inline static void CheckDims(const framework::DDim& seq_tensor_dims, seq_tensor_dims.size() + 1 == pad_tensor_dims.size() || seq_tensor_dims.size() == pad_tensor_dims.size(), true, - platform::errors::InvalidArgument( + phi::errors::InvalidArgument( "pad_tensor's rank should be 1 greater than seq_tensor's " "rank, or be equal with it. The pad_tensor's rank is %ld, " "expected the seq_tensor's rank is %ld or %ld, but got %ld. " diff --git a/paddle/fluid/operators/matmul_v2_op.h b/paddle/fluid/operators/matmul_v2_op.h index a27bf5a33e2..dea27dbbbf0 100644 --- a/paddle/fluid/operators/matmul_v2_op.h +++ b/paddle/fluid/operators/matmul_v2_op.h @@ -26,7 +26,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/complex_functors.h" // only can include the headers in paddle/phi/api dirs -#include "paddle/phi/api/lib/utils/tensor_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/phi/kernels/matmul_grad_kernel.h" #include "paddle/phi/kernels/matmul_kernel.h" diff --git a/paddle/fluid/operators/reduce_ops/reduce_op.h b/paddle/fluid/operators/reduce_ops/reduce_op.h index 36e58aca763..2a375685a9c 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_op.h @@ -26,7 +26,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/math_function.h" // only can include the headers in paddle/phi/api dirs #include "paddle/fluid/framework/convert_utils.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/phi/kernels/cpu/reduce.h" #if defined(__HIPCC__) || defined(__NVCC__) || defined(__xpu__) diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index ae4db70fd52..09d361d0e44 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -19,7 +19,6 @@ limitations under the License. */ #include "paddle/fluid/framework/phi_utils.h" // only can include the headers in paddle/phi/api dirs -#include "paddle/phi/api/lib/utils/tensor_utils.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/common/int_array.h" #include "paddle/phi/core/infermeta_utils.h" diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index 47f6306acbe..14e449c7980 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/infershape_utils.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/phi/infermeta/unary.h" namespace paddle { @@ -61,7 +62,7 @@ class SplitOp : public framework::OperatorWithKernel { if (ctx->IsRuntime() && ctx->HasInput("AxisTensor")) { Variable *var = PADDLE_GET_CONST(Variable *, ctx->GetInputVarPtrs("AxisTensor")[0]); - axis_final = std::move(experimental::MakePhiScalarFromVar(*var)); + axis_final = std::move(framework::MakePhiScalarFromVar(*var)); } else if (!ctx->IsRuntime() && ctx->HasInput("AxisTensor")) { axis_final = std::move(phi::Scalar(-1)); axis_final.SetFromTensor(true); diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index 2e7fc1d2e47..313b9be1b95 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -36,10 +36,10 @@ limitations under the License. */ #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/python_headers.h" #include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/tensor_py.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" #include "paddle/phi/core/string_tensor.h" namespace paddle { namespace pybind { diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index aeb9f048a80..aaf307b65bc 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -33,6 +33,7 @@ typedef SSIZE_T ssize_t; #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/custom_operator.h" #include "paddle/fluid/framework/op_meta_info_helper.h" +#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/python_headers.h" #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/memcpy.h" @@ -47,7 +48,6 @@ typedef SSIZE_T ssize_t; #include "paddle/fluid/pybind/tensor_py.h" #include "paddle/phi/api/ext/op_meta_info.h" #include "paddle/phi/api/lib/utils/allocator.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/dense_tensor.h" diff --git a/paddle/phi/api/lib/utils/CMakeLists.txt b/paddle/phi/api/lib/utils/CMakeLists.txt index ef99a158628..b233a12051e 100644 --- a/paddle/phi/api/lib/utils/CMakeLists.txt +++ b/paddle/phi/api/lib/utils/CMakeLists.txt @@ -1,13 +1,4 @@ cc_library( phi_api_utils SRCS tensor_utils.cc - DEPS tensor_base - convert_utils - dense_tensor - lod_tensor - selected_rows_utils - place - var_type_traits - string_tensor - int_array - scalar) + DEPS dense_tensor int_array scalar) diff --git a/paddle/phi/api/lib/utils/tensor_utils.cc b/paddle/phi/api/lib/utils/tensor_utils.cc index 839391e8a1c..9bb252bd670 100644 --- a/paddle/phi/api/lib/utils/tensor_utils.cc +++ b/paddle/phi/api/lib/utils/tensor_utils.cc @@ -17,8 +17,6 @@ limitations under the License. */ #include #include -#include "paddle/phi/core/tensor_utils.h" - namespace paddle { namespace experimental { @@ -36,102 +34,7 @@ std::unique_ptr MakePhiDenseTensor( return std::make_unique(src); } -phi::Scalar MakePhiScalarFromVar(const framework::Variable& variable) { - auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU); - if (variable.IsType()) { - const auto& tensor = variable.Get(); - PADDLE_ENFORCE_EQ( - tensor.numel(), - 1UL, - platform::errors::InvalidArgument("The DenseTensor used to construct " - "the Scalar contains more than 1 " - "value, it contains `%d` values.", - tensor.numel())); - if (!platform::is_same_place(tensor.place(), expected_place)) { - phi::DenseTensor tmp_tensor; - framework::TensorCopySync(tensor, expected_place, &tmp_tensor); - return {tmp_tensor}; - } else { - return {tensor}; - } - } else { - PADDLE_THROW(platform::errors::Unimplemented( - "Unsupport casting input `%s` type to Scalar when call pt " - "kernel.", - framework::ToTypeName(variable.Type()))); - } -} - phi::IntArray MakePhiIntArray(const phi::DenseTensor& src) { return {src}; } -phi::IntArray MakePhiIntArrayFromVar(const framework::Variable& variable) { - if (variable.IsType()) { - const auto& tensor = variable.Get(); - return MakePhiIntArray(tensor); - } else { - PADDLE_THROW(platform::errors::Unimplemented( - "Unsupport casting input `%s` type to IntArray when call pt " - "kernel.", - framework::ToTypeName(variable.Type()))); - } -} - -// TODO(chentianyu03): Inplace with IntArray constructor -phi::IntArray MakePhiIntArrayFromVarList( - const std::vector& variable_list) { - if (variable_list.size() == 0) { - return phi::IntArray(); - } - auto expected_place = phi::TransToPhiPlace(phi::Backend::CPU); - - std::vector vector_data; - vector_data.reserve(variable_list.size()); - - for (auto* var : variable_list) { - paddle::experimental::DataType data_type; - if (var->IsType()) { - const auto& tensor = var->Get(); - data_type = tensor.dtype(); - if (data_type == paddle::experimental::DataType::INT64) { - const auto& tensor = var->Get(); - if (tensor.IsInitialized() && - !platform::is_same_place(tensor.place(), expected_place)) { - phi::DenseTensor tmp_tensor; - framework::TensorCopySync(tensor, expected_place, &tmp_tensor); - vector_data.push_back(*tmp_tensor.data()); - } else { - vector_data.push_back(*tensor.data()); - } - } else if (data_type == paddle::experimental::DataType::INT32) { - const auto& tensor = var->Get(); - if (tensor.IsInitialized() && - !platform::is_same_place(tensor.place(), expected_place)) { - phi::DenseTensor tmp_tensor; - framework::TensorCopySync(tensor, expected_place, &tmp_tensor); - vector_data.push_back(*tmp_tensor.data()); - } else { - vector_data.push_back(*tensor.data()); - } - } else { - PADDLE_THROW(phi::errors::InvalidArgument( - "Data type error. When cast a LoDTensor to VectorTensor, " - "the data type of LoDTensor must be int32 or int64, " - "but now data type is %s.", - data_type)); - } - } else { - PADDLE_THROW(phi::errors::Unimplemented( - "Unsupport casting input `%s` type to VectorTensor when call pt " - "kernel.", - framework::ToTypeName(var->Type()))); - } - } - - phi::IntArray result{vector_data}; - result.SetFromTensor(true); - - return result; -} - } // namespace experimental } // namespace paddle diff --git a/paddle/phi/api/lib/utils/tensor_utils.h b/paddle/phi/api/lib/utils/tensor_utils.h index 4357eacaf3b..fd545154bbb 100644 --- a/paddle/phi/api/lib/utils/tensor_utils.h +++ b/paddle/phi/api/lib/utils/tensor_utils.h @@ -14,15 +14,10 @@ limitations under the License. */ #pragma once -#include - -#include "paddle/fluid/framework/variable.h" #include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/phi/common/int_array.h" #include "paddle/phi/common/scalar.h" -#include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/dense_tensor.h" -#include "paddle/phi/core/kernel_factory.h" namespace paddle { namespace experimental { @@ -32,12 +27,5 @@ std::unique_ptr MakePhiDenseTensor( phi::IntArray MakePhiIntArray(const phi::DenseTensor& src); -phi::Scalar MakePhiScalarFromVar(const framework::Variable& variable); - -phi::IntArray MakePhiIntArrayFromVar(const framework::Variable& variable); - -phi::IntArray MakePhiIntArrayFromVarList( - const std::vector& variable_list); - } // namespace experimental } // namespace paddle diff --git a/paddle/phi/core/lod_utils.cc b/paddle/phi/core/lod_utils.cc index 15e3f7953a7..d775ad1a18f 100644 --- a/paddle/phi/core/lod_utils.cc +++ b/paddle/phi/core/lod_utils.cc @@ -18,6 +18,19 @@ namespace phi { +LoD ToAbsOffset(const LoD &in) { + // the lowest level stores relative offsets + if (in.empty() || in.size() == 1) return in; + LoD result = in; + for (auto level = static_cast(in.size() - 2); level >= 0; level--) { + for (size_t i = 0; i < in[level].size(); ++i) { + size_t index = in[level][i]; + result[level][i] = result[level + 1][index]; + } + } + return result; +} + void AppendLoD(LoD *lod, const LoD &lod_length) { PADDLE_ENFORCE( lod->empty() || lod->size() == lod_length.size(), diff --git a/paddle/phi/core/lod_utils.h b/paddle/phi/core/lod_utils.h index 147fca4cb57..a366f82c0dd 100644 --- a/paddle/phi/core/lod_utils.h +++ b/paddle/phi/core/lod_utils.h @@ -19,6 +19,11 @@ namespace phi { using LoD = std::vector>; +/* + * Transform an LoD from relative offsets to absolute offsets. + */ +LoD ToAbsOffset(const LoD& in); + void AppendLoD(LoD* lod, const LoD& lod_length); /* diff --git a/paddle/phi/kernels/CMakeLists.txt b/paddle/phi/kernels/CMakeLists.txt index 25bbd17c4fe..e1842510421 100644 --- a/paddle/phi/kernels/CMakeLists.txt +++ b/paddle/phi/kernels/CMakeLists.txt @@ -47,7 +47,7 @@ set(COMMON_KERNEL_DEPS concat_and_split_functor selected_rows_functor) # remove this dep after removing fluid deps on tensor creation -set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} phi_api_utils) +set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} phi_api_utils lod_utils) set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} infermeta infermeta_utils sparse_infermeta) set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} switch_autotune) diff --git a/paddle/phi/kernels/funcs/sequence_scale.cc b/paddle/phi/kernels/funcs/sequence_scale.cc index 5a2873d7e32..5feec87ff3e 100644 --- a/paddle/phi/kernels/funcs/sequence_scale.cc +++ b/paddle/phi/kernels/funcs/sequence_scale.cc @@ -32,7 +32,7 @@ class ScaleLoDTensorFunctor { auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; size_t seq_width = seq->dims()[1]; - paddle::framework::LoD abs_offset_lod = paddle::framework::ToAbsOffset(lod); + phi::LoD abs_offset_lod = phi::ToAbsOffset(lod); T* seq_data = context.template Alloc(seq); for (size_t i = 0; i < num_seq; ++i) { diff --git a/paddle/phi/kernels/funcs/sequence_scale.cu b/paddle/phi/kernels/funcs/sequence_scale.cu index 8feea62a3d7..434c82bf362 100644 --- a/paddle/phi/kernels/funcs/sequence_scale.cu +++ b/paddle/phi/kernels/funcs/sequence_scale.cu @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/sequence_scale.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" +#include "paddle/phi/core/mixed_vector.h" namespace phi { namespace funcs { @@ -44,7 +45,7 @@ class ScaleLoDTensorFunctor { auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; const size_t seq_width = seq->numel() / seq->dims()[0]; - auto abs_offset_lod = paddle::framework::ToAbsOffset(lod); + auto abs_offset_lod = phi::ToAbsOffset(lod); T* seq_data = context.template Alloc(seq); phi::MixVector mix_vector(&(abs_offset_lod[level])); diff --git a/paddle/phi/kernels/funcs/sequence_scale.h b/paddle/phi/kernels/funcs/sequence_scale.h index 02a91d53e58..6e2c1e49c85 100644 --- a/paddle/phi/kernels/funcs/sequence_scale.h +++ b/paddle/phi/kernels/funcs/sequence_scale.h @@ -14,8 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/lod_utils.h" namespace phi { namespace funcs { diff --git a/paddle/phi/kernels/gpu/multiplex_grad_kernel.cu b/paddle/phi/kernels/gpu/multiplex_grad_kernel.cu index 21576ab608d..4f355d6b88a 100644 --- a/paddle/phi/kernels/gpu/multiplex_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/multiplex_grad_kernel.cu @@ -14,9 +14,10 @@ #include "paddle/phi/kernels/multiplex_grad_kernel.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" +#include "paddle/fluid/memory/memcpy.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/funcs/eigen/common.h" namespace phi { @@ -40,7 +41,7 @@ void MultiplexGradKernel(const Context& ctx, auto rows = ins_grad[idx]->dims()[0]; auto cols = ins_grad[idx]->numel() / rows; DenseTensor index_t_cpu; - paddle::framework::TensorCopySync(ids, phi::CPUPlace(), &index_t_cpu); + phi::Copy(ctx, ids, phi::CPUPlace(), true, &index_t_cpu); auto* index = index_t_cpu.data(); auto stream = ctx.stream(); for (auto i = 0; i < rows; i++) { diff --git a/paddle/phi/kernels/gpu/multiplex_kernel.cu b/paddle/phi/kernels/gpu/multiplex_kernel.cu index 2a86827bcf4..92ac5378d44 100644 --- a/paddle/phi/kernels/gpu/multiplex_kernel.cu +++ b/paddle/phi/kernels/gpu/multiplex_kernel.cu @@ -14,9 +14,10 @@ #include "paddle/phi/kernels/multiplex_kernel.h" -#include "paddle/phi/api/lib/utils/tensor_utils.h" +#include "paddle/fluid/memory/memcpy.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/tensor_utils.h" namespace phi { @@ -38,7 +39,7 @@ void MultiplexKernel(const Context& ctx, auto rows = ins[0]->dims()[0]; auto cols = ins[0]->numel() / rows; DenseTensor index_t_cpu; - paddle::framework::TensorCopySync(ids, phi::CPUPlace(), &index_t_cpu); + phi::Copy(ctx, ids, phi::CPUPlace(), true, &index_t_cpu); auto* index = index_t_cpu.data(); auto stream = ctx.stream(); for (auto i = 0; i < ids.dims()[0]; i++) { diff --git a/paddle/phi/kernels/impl/warpctc_kernel_impl.h b/paddle/phi/kernels/impl/warpctc_kernel_impl.h index baabf8465b7..c7de98d4f3a 100644 --- a/paddle/phi/kernels/impl/warpctc_kernel_impl.h +++ b/paddle/phi/kernels/impl/warpctc_kernel_impl.h @@ -19,6 +19,7 @@ #include "paddle/fluid/operators/math/sequence_padding.h" #include "paddle/phi/backends/dynload/warpctc.h" #include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/lod_utils.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/math_function.h" @@ -293,7 +294,7 @@ void WarpctcKernel(const Context& dev_ctx, phi::errors::InvalidArgument("Input(Label) Tensor of WarpCTC " "does not contain LoD information.")); - logits_lod = paddle::framework::ToAbsOffset(logits.lod())[0]; + logits_lod = phi::ToAbsOffset(logits.lod())[0]; auto logits_dims = logits.dims(); PADDLE_ENFORCE_GT(logits_dims[0], @@ -313,7 +314,7 @@ void WarpctcKernel(const Context& dev_ctx, static_cast(logits_lod.back()), logits_dims[0])); - label_lod = paddle::framework::ToAbsOffset(label.lod())[0]; + label_lod = phi::ToAbsOffset(label.lod())[0]; auto label_dims = label.dims(); PADDLE_ENFORCE_EQ(label_dims[1], 1, -- GitLab