diff --git a/paddle/infrt/kernel/phi/dense_tensor_kernels.cc b/paddle/infrt/kernel/phi/dense_tensor_kernels.cc index 7ffc8de15107563f778ea00a1a517d13b02e0938..a9b18c769dca81f44c2f87b0eb6935b3f4da7966 100644 --- a/paddle/infrt/kernel/phi/dense_tensor_kernels.cc +++ b/paddle/infrt/kernel/phi/dense_tensor_kernels.cc @@ -13,7 +13,9 @@ // limitations under the License. #include "paddle/infrt/kernel/phi/dense_tensor_kernels.h" +#include #include "llvm/Support/ErrorHandling.h" +#include "paddle/infrt/backends/host/phi_allocator.h" #include "paddle/infrt/common/string.h" #include "paddle/infrt/dialect/phi/data_type.h" #include "paddle/infrt/kernel/phi/context_kernels.h" @@ -22,24 +24,13 @@ #include "paddle/infrt/tensor/tensor_map.h" #include "paddle/phi/backends/all_context.h" #include "paddle/phi/common/place.h" +#include "paddle/phi/core/allocator.h" #include "paddle/phi/core/dense_tensor.h" #ifdef INFRT_WITH_GPU #include #endif -namespace paddle { -namespace platform { -using DeviceContext = ::phi::DeviceContext; -} // namespace platform -namespace framework { -using LoDTensor = ::phi::DenseTensor; -void DeserializeFromStream(std::istream& is, - LoDTensor* tensor, - const platform::DeviceContext& dev_ctx); -} -} // namespace paddle - namespace infrt { namespace kernel { namespace phi { @@ -198,6 +189,12 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) { auto pb_proto_prog = paddle::LoadProgram(model_path); auto main_block = pb_proto_prog->blocks(0); + ::phi::CPUContext ctx; + auto allocator = std::make_unique(); + const auto* allocator_ptr = allocator.get(); + ctx.SetAllocator(allocator_ptr); + ctx.SetHostAllocator(allocator_ptr); + ctx.SetZeroAllocator(allocator_ptr); for (auto& var : main_block.vars()) { if (var.name() == "feed" || var.name() == "fetch" || !var.persistable()) continue; @@ -207,9 +204,7 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) { case ::paddle::framework::proto::VarType_Type_LOD_TENSOR: { std::unique_ptr<::phi::DenseTensor> tensor{ std::make_unique<::phi::DenseTensor>()}; - ::phi::CPUContext ctx; - ::paddle::framework::DeserializeFromStream( - param_file, tensor.get(), ctx); + ::infrt::paddle::DeserializeFromStream(param_file, tensor.get(), ctx); map.SetDenseTensor(var.name(), std::move(tensor)); } break; default: { @@ -249,11 +244,16 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) { } } + ::phi::CPUContext ctx; + auto allocator = std::make_unique(); + const auto* allocator_ptr = allocator.get(); + ctx.SetAllocator(allocator_ptr); + ctx.SetHostAllocator(allocator_ptr); + ctx.SetZeroAllocator(allocator_ptr); for (auto& var : tmp) { std::unique_ptr<::phi::DenseTensor> tensor{ std::make_unique<::phi::DenseTensor>()}; - ::phi::CPUContext ctx; - ::paddle::framework::DeserializeFromStream(param_file, tensor.get(), ctx); + ::infrt::paddle::DeserializeFromStream(param_file, tensor.get(), ctx); map.SetDenseTensor(var, std::move(tensor)); } diff --git a/paddle/infrt/paddle/model_parser.cc b/paddle/infrt/paddle/model_parser.cc index f3de1a630451cc387765040191be8715768be510..da4f8b6420b22dc67d5b1b82d39a86377353b118 100644 --- a/paddle/infrt/paddle/model_parser.cc +++ b/paddle/infrt/paddle/model_parser.cc @@ -22,6 +22,10 @@ #include "paddle/infrt/common/target.h" #include "paddle/infrt/common/type.h" +#ifdef INFRT_WITH_PHI +#include "paddle/phi/common/data_type.h" +#endif + namespace infrt { namespace paddle { @@ -170,5 +174,96 @@ void LoadParam(const std::string &path, _Variable *out, const Target &target) { LoadLoDTensor(fin, out, target); } +#ifdef INFRT_WITH_PHI +namespace framework_proto = ::paddle::framework::proto; + +inline ::phi::DataType PhiDataType(framework_proto::VarType::Type type) { + using Type = framework_proto::VarType::Type; + switch (static_cast(type)) { + case Type::VarType_Type_BOOL: + return ::phi::DataType::BOOL; + case Type::VarType_Type_INT8: + return ::phi::DataType::INT8; + case Type::VarType_Type_UINT8: + return ::phi::DataType::UINT8; + case Type::VarType_Type_INT16: + return ::phi::DataType::INT16; + case Type::VarType_Type_INT32: + return ::phi::DataType::INT32; + case Type::VarType_Type_INT64: + return ::phi::DataType::INT64; + case Type::VarType_Type_SIZE_T: + return ::phi::DataType::UINT64; + case Type::VarType_Type_FP16: + return ::phi::DataType::FLOAT16; + case Type::VarType_Type_FP32: + return ::phi::DataType::FLOAT32; + case Type::VarType_Type_FP64: + return ::phi::DataType::FLOAT64; + default: + LOG(FATAL) << "unknown data type " << type; + } + return ::phi::DataType::UNDEFINED; +} + +inline void TensorFromStream(std::istream &is, + ::phi::DenseTensor *tensor, + const ::phi::CPUContext &ctx) { + uint32_t version; + is.read(reinterpret_cast(&version), sizeof(version)); + CHECK_EQ(version, 0U); + framework_proto::VarType::TensorDesc desc; + { // int32_t size + // proto buffer + int32_t size = -1; + is.read(reinterpret_cast(&size), sizeof(size)); + CHECK_EQ(is.good(), true); + CHECK_GE(size, 0); + std::unique_ptr buf(new char[size]); + is.read(reinterpret_cast(buf.get()), size); + CHECK_EQ(desc.ParseFromArray(buf.get(), size), true); + } + { // read tensor + std::vector dims; + dims.reserve(static_cast(desc.dims().size())); + std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims)); + tensor->Resize(::phi::make_ddim(dims)); + void *buf; + size_t size = tensor->numel() * SizeOfType(desc.data_type()); + ctx.HostAlloc(tensor, PhiDataType(desc.data_type()), size); + buf = tensor->data(); + is.read(static_cast(buf), size); + } +} + +void DeserializeFromStream(std::istream &is, + ::phi::DenseTensor *tensor, + const ::phi::CPUContext &dev_ctx) { + { + // the 1st field, unit32_t version for LoDTensor + uint32_t version; + is.read(reinterpret_cast(&version), sizeof(version)); + CHECK_EQ(version, 0U); + } + { + // the 2st field, LoD information + uint64_t lod_level; + is.read(reinterpret_cast(&lod_level), sizeof(lod_level)); + auto &lod = *tensor->mutable_lod(); + lod.resize(lod_level); + for (uint64_t i = 0; i < lod_level; ++i) { + uint64_t size; + is.read(reinterpret_cast(&size), sizeof(size)); + std::vector tmp(size / sizeof(size_t)); + is.read(reinterpret_cast(tmp.data()), + static_cast(size)); + lod[i] = tmp; + } + } + // the 3st filed, Tensor + TensorFromStream(is, tensor, dev_ctx); +} +#endif + } // namespace paddle } // namespace infrt diff --git a/paddle/infrt/paddle/model_parser.h b/paddle/infrt/paddle/model_parser.h index 373f77033dcefa1a81cd8756da859b6d232337a0..5f039ad5d3ad89e25187f1a316e84f8c03df4b63 100644 --- a/paddle/infrt/paddle/model_parser.h +++ b/paddle/infrt/paddle/model_parser.h @@ -25,6 +25,11 @@ #include "paddle/infrt/paddle/scope.h" #include "paddle/infrt/paddle/tensor.h" +#ifdef INFRT_WITH_PHI +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/dense_tensor.h" +#endif + namespace infrt { namespace paddle { namespace framework_proto = ::paddle::framework::proto; @@ -53,5 +58,11 @@ void TensorFromStream( const common::Target& target = common::DefaultHostTarget()); void ReadBinaryFile(const std::string& filename, std::string* contents); +#ifdef INFRT_WITH_PHI +void DeserializeFromStream(std::istream& is, + ::phi::DenseTensor* tensor, + const ::phi::CPUContext& dev_ctx); +#endif + } // namespace paddle } // namespace infrt