未验证 提交 9f9e591d 编写于 作者: W Wilber 提交者: GitHub

remove fluid memory pool (#41862)

上级 5a103150
......@@ -13,7 +13,9 @@
// limitations under the License.
#include "paddle/infrt/kernel/phi/dense_tensor_kernels.h"
#include <memory>
#include "llvm/Support/ErrorHandling.h"
#include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/infrt/common/string.h"
#include "paddle/infrt/dialect/phi/data_type.h"
#include "paddle/infrt/kernel/phi/context_kernels.h"
......@@ -22,24 +24,13 @@
#include "paddle/infrt/tensor/tensor_map.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#ifdef INFRT_WITH_GPU
#include <cuda_runtime.h>
#endif
namespace paddle {
namespace platform {
using DeviceContext = ::phi::DeviceContext;
} // namespace platform
namespace framework {
using LoDTensor = ::phi::DenseTensor;
void DeserializeFromStream(std::istream& is,
LoDTensor* tensor,
const platform::DeviceContext& dev_ctx);
}
} // namespace paddle
namespace infrt {
namespace kernel {
namespace phi {
......@@ -198,6 +189,12 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) {
auto pb_proto_prog = paddle::LoadProgram(model_path);
auto main_block = pb_proto_prog->blocks(0);
::phi::CPUContext ctx;
auto allocator = std::make_unique<backends::CpuPhiAllocator>();
const auto* allocator_ptr = allocator.get();
ctx.SetAllocator(allocator_ptr);
ctx.SetHostAllocator(allocator_ptr);
ctx.SetZeroAllocator(allocator_ptr);
for (auto& var : main_block.vars()) {
if (var.name() == "feed" || var.name() == "fetch" || !var.persistable())
continue;
......@@ -207,9 +204,7 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) {
case ::paddle::framework::proto::VarType_Type_LOD_TENSOR: {
std::unique_ptr<::phi::DenseTensor> tensor{
std::make_unique<::phi::DenseTensor>()};
::phi::CPUContext ctx;
::paddle::framework::DeserializeFromStream(
param_file, tensor.get(), ctx);
::infrt::paddle::DeserializeFromStream(param_file, tensor.get(), ctx);
map.SetDenseTensor(var.name(), std::move(tensor));
} break;
default: {
......@@ -249,11 +244,16 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) {
}
}
::phi::CPUContext ctx;
auto allocator = std::make_unique<backends::CpuPhiAllocator>();
const auto* allocator_ptr = allocator.get();
ctx.SetAllocator(allocator_ptr);
ctx.SetHostAllocator(allocator_ptr);
ctx.SetZeroAllocator(allocator_ptr);
for (auto& var : tmp) {
std::unique_ptr<::phi::DenseTensor> tensor{
std::make_unique<::phi::DenseTensor>()};
::phi::CPUContext ctx;
::paddle::framework::DeserializeFromStream(param_file, tensor.get(), ctx);
::infrt::paddle::DeserializeFromStream(param_file, tensor.get(), ctx);
map.SetDenseTensor(var, std::move(tensor));
}
......
......@@ -22,6 +22,10 @@
#include "paddle/infrt/common/target.h"
#include "paddle/infrt/common/type.h"
#ifdef INFRT_WITH_PHI
#include "paddle/phi/common/data_type.h"
#endif
namespace infrt {
namespace paddle {
......@@ -170,5 +174,96 @@ void LoadParam(const std::string &path, _Variable *out, const Target &target) {
LoadLoDTensor(fin, out, target);
}
#ifdef INFRT_WITH_PHI
namespace framework_proto = ::paddle::framework::proto;
inline ::phi::DataType PhiDataType(framework_proto::VarType::Type type) {
using Type = framework_proto::VarType::Type;
switch (static_cast<int>(type)) {
case Type::VarType_Type_BOOL:
return ::phi::DataType::BOOL;
case Type::VarType_Type_INT8:
return ::phi::DataType::INT8;
case Type::VarType_Type_UINT8:
return ::phi::DataType::UINT8;
case Type::VarType_Type_INT16:
return ::phi::DataType::INT16;
case Type::VarType_Type_INT32:
return ::phi::DataType::INT32;
case Type::VarType_Type_INT64:
return ::phi::DataType::INT64;
case Type::VarType_Type_SIZE_T:
return ::phi::DataType::UINT64;
case Type::VarType_Type_FP16:
return ::phi::DataType::FLOAT16;
case Type::VarType_Type_FP32:
return ::phi::DataType::FLOAT32;
case Type::VarType_Type_FP64:
return ::phi::DataType::FLOAT64;
default:
LOG(FATAL) << "unknown data type " << type;
}
return ::phi::DataType::UNDEFINED;
}
inline void TensorFromStream(std::istream &is,
::phi::DenseTensor *tensor,
const ::phi::CPUContext &ctx) {
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
CHECK_EQ(version, 0U);
framework_proto::VarType::TensorDesc desc;
{ // int32_t size
// proto buffer
int32_t size = -1;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
CHECK_EQ(is.good(), true);
CHECK_GE(size, 0);
std::unique_ptr<char[]> buf(new char[size]);
is.read(reinterpret_cast<char *>(buf.get()), size);
CHECK_EQ(desc.ParseFromArray(buf.get(), size), true);
}
{ // read tensor
std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
tensor->Resize(::phi::make_ddim(dims));
void *buf;
size_t size = tensor->numel() * SizeOfType(desc.data_type());
ctx.HostAlloc(tensor, PhiDataType(desc.data_type()), size);
buf = tensor->data();
is.read(static_cast<char *>(buf), size);
}
}
void DeserializeFromStream(std::istream &is,
::phi::DenseTensor *tensor,
const ::phi::CPUContext &dev_ctx) {
{
// the 1st field, unit32_t version for LoDTensor
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
CHECK_EQ(version, 0U);
}
{
// the 2st field, LoD information
uint64_t lod_level;
is.read(reinterpret_cast<char *>(&lod_level), sizeof(lod_level));
auto &lod = *tensor->mutable_lod();
lod.resize(lod_level);
for (uint64_t i = 0; i < lod_level; ++i) {
uint64_t size;
is.read(reinterpret_cast<char *>(&size), sizeof(size));
std::vector<size_t> tmp(size / sizeof(size_t));
is.read(reinterpret_cast<char *>(tmp.data()),
static_cast<std::streamsize>(size));
lod[i] = tmp;
}
}
// the 3st filed, Tensor
TensorFromStream(is, tensor, dev_ctx);
}
#endif
} // namespace paddle
} // namespace infrt
......@@ -25,6 +25,11 @@
#include "paddle/infrt/paddle/scope.h"
#include "paddle/infrt/paddle/tensor.h"
#ifdef INFRT_WITH_PHI
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#endif
namespace infrt {
namespace paddle {
namespace framework_proto = ::paddle::framework::proto;
......@@ -53,5 +58,11 @@ void TensorFromStream(
const common::Target& target = common::DefaultHostTarget());
void ReadBinaryFile(const std::string& filename, std::string* contents);
#ifdef INFRT_WITH_PHI
void DeserializeFromStream(std::istream& is,
::phi::DenseTensor* tensor,
const ::phi::CPUContext& dev_ctx);
#endif
} // namespace paddle
} // namespace infrt
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册