io.h 2.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <map>
W
wangliu 已提交
18
#include <memory>
19 20 21 22 23 24 25 26 27 28 29
#include <string>
#include <vector>

#include "common/types.h"
#include "framework/lod_tensor.h"
#include "framework/operator.h"
#include "framework/program/program.h"
#include "framework/tensor.h"

namespace paddle_mobile {

W
wangliu 已提交
30
template <typename Dtype = CPU, Precision P = Precision::FP32>
31
class Loader {
W
wangliu 已提交
32
 public:
33 34 35
  const framework::Program<Dtype, P> Load(const std::string &dirname,
                                          bool optimize = false);

W
wangliu 已提交
36
 private:
37 38 39 40 41
  void LoadVar(framework::Variable *variable,
               const framework::VarDesc &var_desc,
               const std::string &file_path);
};

W
wangliu 已提交
42
template <typename Dtype = CPU, Precision P = Precision::FP32>
43
class Executor {
W
wangliu 已提交
44
 public:
45 46 47 48 49 50 51 52 53 54
  typedef typename PrecisionTrait<P>::ptype Ptype;

  Executor(const framework::Program<Dtype> p, int batch_size = 1,
           bool use_optimize = true);

  std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t);

  std::vector<Ptype> Predict(const std::vector<Ptype> &input,
                             const std::vector<int64_t> &dims);

W
wangliu 已提交
55
 protected:
56 57 58 59 60 61 62 63 64 65 66
  Executor() = default;

  void InitMemory();
  void LoadMemory(const framework::VarDesc var_desc,
                  framework::LoDTensor *tensor, const std::string &file_path);
  framework::Program<Dtype> program_;
  int batch_size_ = 1;
  std::shared_ptr<framework::ProgramDesc> to_predict_program_;
  std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t,
                                             int block_id);
  std::map<framework::BlockDesc,
W
wangliu 已提交
67 68
           std::vector<std::shared_ptr<framework::OperatorBase<Dtype>>>>
      ops_of_block_;
69 70 71
  bool use_optimize_ = false;
};

W
wangliu 已提交
72
}  // namespace paddle_mobile