diff --git a/paddle/fluid/lite/core/tensor.h b/paddle/fluid/lite/core/tensor.h index 62e4301faf586b5b8fd65a1913e85c65e20c0c07..15a2ba3aee1b62cb8d1dd24827f45f5469769e0e 100644 --- a/paddle/fluid/lite/core/tensor.h +++ b/paddle/fluid/lite/core/tensor.h @@ -15,6 +15,7 @@ #pragma once #include #include +#include #include "memory.h" namespace paddle { diff --git a/paddle/fluid/lite/model_parser/model_parser.cc b/paddle/fluid/lite/model_parser/model_parser.cc index 9b5e676fa12396238b45ad90a5169c681f9eceb2..fb3b1e9ac7f272f0c882d148b6a996a825a462c3 100644 --- a/paddle/fluid/lite/model_parser/model_parser.cc +++ b/paddle/fluid/lite/model_parser/model_parser.cc @@ -132,6 +132,12 @@ std::unique_ptr LoadProgram( void LoadParams(const std::string &path) {} +// Load directly to CPU, and latter transfer to other devices. +void LoadParam(const std::string &path, Variable *out) { + std::ifstream fin(path, std::ios::binary); + LoadLoDTensor(fin, out); +} + void LoadModel(const std::string &model_dir, Scope *scope) { const std::string prog_path = model_dir + "/__model__"; auto prog = LoadProgram(prog_path); diff --git a/paddle/fluid/lite/model_parser/model_parser.h b/paddle/fluid/lite/model_parser/model_parser.h index f65edabb3f3c569869a35054d0c1526ffa5be43b..358bb9a1abeb6c595c92b62e7079051699ce9843 100644 --- a/paddle/fluid/lite/model_parser/model_parser.h +++ b/paddle/fluid/lite/model_parser/model_parser.h @@ -19,6 +19,8 @@ #include #include #include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/lite/core/tensor.h" +#include "paddle/fluid/lite/core/variable.h" namespace paddle { namespace lite { @@ -30,6 +32,9 @@ std::unique_ptr LoadProgram( // Read a single file containing all the parameters. void LoadParams(const std::string& path); +// Load a single parameter to an output tensor. +void LoadParam(const std::string& path, Variable* out); + // Read a model and files of parameters. void LoadModel(const std::string& model_dir);