loader.cpp 9.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "framework/loader.h"
16 17 18

#include "framework/lod_tensor.h"
#include "framework/program/program-optimize/program_optimize.h"
Y
yangfei 已提交
19 20 21
#ifdef PADDLE_MOBILE_CL
#include "framework/cl/cl_image.h"
#endif
22 23

namespace paddle_mobile {
24
namespace framework {
25

26 27 28 29 30 31
/**
 * muteandresize tensor as originProgramDesc and scope in loadParams
 *
 * @param originProgramDesc
 * @param scope
 */
32
template <typename Dtype, Precision P>
Y
yangfei 已提交
33
void Loader<Dtype, P>::InitMemoryFromProgram(
34 35
    const std::shared_ptr<ProgramDesc> &originProgramDesc,
    const std::shared_ptr<Scope> &scope) {
36 37 38
  for (const auto &block : originProgramDesc.get()->Blocks()) {
    for (const auto &var_desc : block->Vars()) {
      auto var = scope.get()->Var(var_desc->Name());
39
      if (var_desc->Type() == VARTYPE_TYPE_LOD_TENSOR) {
X
Xin Pan 已提交
40
        if (var_desc->Persistable()) {
41
          auto dim = var_desc->Tensor_desc().Dims();
42 43
          auto tensor = var->GetMutable<LoDTensor>();
          tensor->Resize(make_ddim(dim));
44 45 46 47
        } else {
          auto dim = var_desc->Tensor_desc().Dims();
          PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0");
          dim[0] = 1;
48 49
          auto tensor = var->GetMutable<LoDTensor>();
          tensor->Resize(make_ddim(dim));
50 51 52 53 54 55 56
        }
      } else {
        // TODO(codeWorm): some.
      }
    }
  }
}
57

Y
yangfei 已提交
58
#ifdef PADDLE_MOBILE_CL
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
template <>
void Loader<GPU_CL, Precision::FP32>::InitMemoryFromProgram(
    const std::shared_ptr<ProgramDesc> &originProgramDesc,
    const std::shared_ptr<Scope> &scope) {
  for (const auto &block : originProgramDesc.get()->Blocks()) {
    for (const auto &var_desc : block->Vars()) {
      auto var = scope.get()->Var(var_desc->Name());
      if (var_desc->Type() == VARTYPE_TYPE_LOD_TENSOR) {
        if (var_desc->Persistable()) {
          auto dim = var_desc->Tensor_desc().Dims();
          //              auto tensor = var->GetMutable<LoDTensor>();
          auto cl_image = var->GetMutable<framework::CLImage>();
          cl_image->Resize(make_ddim(dim));
        } else {
          auto dim = var_desc->Tensor_desc().Dims();
          PADDLE_MOBILE_ENFORCE(dim.size() > 0, "dim size is 0");
          dim[0] = 1;
          auto cl_image = var->GetMutable<framework::CLImage>();
          cl_image->Resize(make_ddim(dim));
Y
yangfei 已提交
78
        }
79 80 81 82 83 84
      } else {
        // TODO(codeWorm): some.
      }
    }
  }
}
Y
yangfei 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
template <>
const Program<GPU_CL, Precision::FP32>
Loader<GPU_CL, Precision::FP32>::LoadCombinedMemory(
    size_t read_size, const uint8_t *buf, size_t combined_params_len,
    uint8_t *combined_params_buf, bool optimize, bool quantification) {
  bool can_add_split = false;

  PaddleMobile__Framework__Proto__ProgramDesc *c_program;
  PADDLE_MOBILE_ENFORCE(buf != nullptr, "read from __model__ is null");

  c_program = paddle_mobile__framework__proto__program_desc__unpack(
      nullptr, read_size, buf);
  //
  PADDLE_MOBILE_ENFORCE(c_program != nullptr, "program is null");
  //
  DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
  //

  auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);

  Program<GPU_CL, Precision::FP32> program;
  program.combined = true;
  program.originProgram = originProgramDesc;
  program.quantification = quantification;
  program.combined_params_len = combined_params_len;
  program.combined_params_buf = combined_params_buf;

  auto scope = std::make_shared<Scope>();
  program.scope = scope;
  InitMemoryFromProgram(originProgramDesc, scope);
  if (optimize) {
    ProgramOptimize program_optimize;
    program.optimizeProgram =
        program_optimize.FusionOptimize(originProgramDesc, can_add_split);
    if (!program.optimizeProgram) {
      program.optimizeProgram = originProgramDesc;
    }
  }
  if (optimize) {
    program.optimizeProgram->Description("optimize: ");
  } else {
    originProgramDesc->Description("program: ");
  }
  paddle_mobile__framework__proto__program_desc__free_unpacked(c_program,
                                                               nullptr);
  return program;
}

Y
yangfei 已提交
133 134
#endif

135 136 137 138 139 140 141 142 143 144 145
/**
 * fusion and print someinfos
 * @tparam Dtype
 * @tparam P
 * @param optimize
 * @param can_add_split
 * @param program
 * @param originProgramDesc
 */
template <typename Dtype, Precision P>
void FusionAndPrintInfos(
L
liuruilong 已提交
146
    bool optimize, bool can_add_split, Program<Dtype, P> *program,
147
    const std::shared_ptr<ProgramDesc> &originProgramDesc) {
148
  if (optimize) {
149
    ProgramOptimize program_optimize;
L
liuruilong 已提交
150
    program->optimizeProgram =
151
        program_optimize.FusionOptimize(originProgramDesc, can_add_split);
L
liuruilong 已提交
152 153
    if (!program->optimizeProgram) {
      program->optimizeProgram = originProgramDesc;
154
    }
155 156
  }
  if (optimize) {
L
liuruilong 已提交
157
    program->optimizeProgram->Description("optimize: ");
158 159 160 161
  } else {
    originProgramDesc->Description("program: ");
  }
}
162

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static size_t ReadBuffer(const char *file_name, uint8_t **out) {
  FILE *fp;
  fp = fopen(file_name, "rb");
  PADDLE_MOBILE_ENFORCE(fp != NULL, " %s open failed !", file_name);

  fseek(fp, 0, SEEK_END);
  size_t size = ftell(fp);
  rewind(fp);

  DLOG << "model size: " << size;

  *out = reinterpret_cast<uint8_t *>(malloc(size));

  size_t cur_len = 0;
  size_t nread;
  while ((nread = fread(*out + cur_len, 1, size - cur_len, fp)) != 0) {
    cur_len += nread;
  }
  fclose(fp);
  return cur_len;
}

template <typename Dtype, Precision P>
186 187 188 189
const Program<Dtype, P> Loader<Dtype, P>::Load(const std::string &dirname,
                                               bool optimize,
                                               bool quantification,
                                               bool can_add_split) {
W
wangliu 已提交
190 191
  auto program = this->LoadProgram(dirname + "/__model__", optimize,
                                   quantification, can_add_split);
192 193 194 195 196
  program.model_path = dirname;
  return program;
}

template <typename Dtype, Precision P>
197 198 199 200
const Program<Dtype, P> Loader<Dtype, P>::Load(const std::string &model_path,
                                               const std::string &para_path,
                                               bool optimize,
                                               bool quantification) {
201 202
  auto program = this->LoadProgram(model_path, optimize, quantification);

203 204
  program.para_path = para_path;
  program.combined = true;
205
  program.quantification = quantification;
206 207 208 209
  return program;
}

template <typename Dtype, Precision P>
210
const Program<Dtype, P> Loader<Dtype, P>::LoadProgram(
W
wangliu 已提交
211 212
    const std::string &model_path, bool optimize, bool quantification,
    bool can_add_split) {
213 214 215 216 217 218 219 220 221 222 223 224 225 226
  std::string model_filename = model_path;
  PaddleMobile__Framework__Proto__ProgramDesc *c_program;
  uint8_t *buf = NULL;
  size_t read_size = ReadBuffer(model_filename.c_str(), &buf);

  PADDLE_MOBILE_ENFORCE(buf != NULL, "read from __model__ is null");

  c_program = paddle_mobile__framework__proto__program_desc__unpack(
      NULL, read_size, buf);
  //
  PADDLE_MOBILE_ENFORCE(c_program != NULL, "program is null");
  //
  DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
  //
227
  auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
228

229
  Program<Dtype, P> program;
230
  program.originProgram = originProgramDesc;
231
  program.quantification = quantification;
232 233
  program.combined_params_len = 0;
  program.combined_params_buf = nullptr;
234
  auto scope = std::make_shared<Scope>();
235 236
  program.scope = scope;

237 238 239
  // use  originProgramDesc and scope to init tensors
  InitMemoryFromProgram(originProgramDesc, scope);
  // perform fusion and print infos
L
liuruilong 已提交
240
  FusionAndPrintInfos(optimize, can_add_split, &program, originProgramDesc);
241

242 243 244
  paddle_mobile__framework__proto__program_desc__free_unpacked(c_program, NULL);
  return program;
}
245

246
template <typename Dtype, Precision P>
247
const Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
248
    size_t read_size, const uint8_t *buf, size_t combined_params_len,
L
liuruilong 已提交
249
    uint8_t *combined_params_buf, bool optimize, bool quantification) {
250
  bool can_add_split = false;
251

252 253 254 255 256 257 258 259 260 261 262
  PaddleMobile__Framework__Proto__ProgramDesc *c_program;
  PADDLE_MOBILE_ENFORCE(buf != nullptr, "read from __model__ is null");

  c_program = paddle_mobile__framework__proto__program_desc__unpack(
      nullptr, read_size, buf);
  //
  PADDLE_MOBILE_ENFORCE(c_program != nullptr, "program is null");
  //
  DLOG << "n_ops: " << (*c_program->blocks)->n_ops;
  //

263
  auto originProgramDesc = std::make_shared<ProgramDesc>(c_program);
264

265
  Program<Dtype, P> program;
266 267 268 269 270 271
  program.combined = true;
  program.originProgram = originProgramDesc;
  program.quantification = quantification;
  program.combined_params_len = combined_params_len;
  program.combined_params_buf = combined_params_buf;

272
  auto scope = std::make_shared<Scope>();
273 274
  program.scope = scope;
  InitMemoryFromProgram(originProgramDesc, scope);
L
liuruilong 已提交
275
  FusionAndPrintInfos(optimize, can_add_split, &program, originProgramDesc);
276 277
  paddle_mobile__framework__proto__program_desc__free_unpacked(c_program,
                                                               nullptr);
278 279 280 281
  return program;
}

template class Loader<CPU, Precision::FP32>;
282

283 284 285 286
template class Loader<FPGA, Precision::FP32>;

template class Loader<GPU_MALI, Precision::FP32>;

287
template class Loader<GPU_CL, Precision::FP32>;
288

289
}  // namespace framework
290
}  // namespace paddle_mobile