提交 55b4e12a 编写于 作者: Y yangfei963158659 提交者: GitHub

Merge pull request #1257 from yangfei963158659/develop

fix load memory bug
......@@ -82,6 +82,54 @@ void Loader<GPU_CL, Precision::FP32>::InitMemoryFromProgram(
}
}
}
template <>
const Program<GPU_CL, Precision::FP32>
Loader<GPU_CL, Precision::FP32>::LoadCombinedMemory(
size_t read_size, const uint8_t *buf, size_t combined_params_len,
uint8_t *combined_params_buf, bool optimize, bool quantification) {
bool can_add_split = false;
PaddleMobile__Framework__Proto__ProgramDesc *c_program;
PADDLE_MOBILE_ENFORCE(buf != nullptr, "read from __model__ is null");
c_program = paddle_mobile__framework__proto__program_desc__unpack(
nullptr, read_size, buf);
//
PADDLE_MOBILE_ENFORCE(c_program != nullptr, "program is null");
//
DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
//
auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
Program<GPU_CL, Precision::FP32> program;
program.combined = true;
program.originProgram = originProgramDesc;
program.quantification = quantification;
program.combined_params_len = combined_params_len;
program.combined_params_buf = combined_params_buf;
auto scope = std::make_shared<Scope>();
program.scope = scope;
InitMemoryFromProgram(originProgramDesc, scope);
if (optimize) {
ProgramOptimize program_optimize;
program.optimizeProgram =
program_optimize.FusionOptimize(originProgramDesc, can_add_split);
if (!program.optimizeProgram) {
program.optimizeProgram = originProgramDesc;
}
}
if (optimize) {
program.optimizeProgram->Description("optimize: ");
} else {
originProgramDesc->Description("program: ");
}
paddle_mobile__framework__proto__program_desc__free_unpacked(c_program,
nullptr);
return program;
}
#endif
/**
......
......@@ -55,11 +55,11 @@ static char *Get_binary_data(std::string filename) {
paddle_mobile::PaddleMobileConfig GetConfig() {
paddle_mobile::PaddleMobileConfig config;
config.precision = paddle_mobile::PaddleMobileConfig::FP32;
config.device = paddle_mobile::PaddleMobileConfig::kCPU;
config.device = paddle_mobile::PaddleMobileConfig::kGPU_CL;
const std::shared_ptr<paddle_mobile::PaddleModelMemoryPack> &memory_pack =
std::make_shared<paddle_mobile::PaddleModelMemoryPack>();
auto model_path = std::string(g_genet_combine) + "/model";
auto params_path = std::string(g_genet_combine) + "/params";
auto model_path = std::string(g_mobilenet_combined) + "/model";
auto params_path = std::string(g_mobilenet_combined) + "/params";
memory_pack->model_size =
ReadBuffer(model_path.c_str(), &memory_pack->model_buf);
std::cout << "sizeBuf: " << memory_pack->model_size << std::endl;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册