test_engine.cc 4.8 KB
Newer Older
石晓伟 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gtest/gtest.h>

#include "paddle/fluid/inference/lite/engine.h"
#include "paddle/fluid/inference/utils/singleton.h"
#include "paddle/fluid/operators/lite/ut_helper.h"

#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"

namespace paddle {
namespace inference {
namespace lite {

using inference::lite::AddTensorToBlockDesc;
using inference::lite::CreateTensor;
using inference::lite::serialize_params;

void make_fake_model(std::string* model, std::string* param) {
  framework::ProgramDesc program;
  LOG(INFO) << "program.block size is " << program.Size();
  auto* block_ = program.Proto()->mutable_blocks(0);
  LOG(INFO) << "create block desc";
  framework::BlockDesc block_desc(&program, block_);
  auto* feed0 = block_desc.AppendOp();
  feed0->SetType("feed");
  feed0->SetInput("X", {"feed"});
  feed0->SetOutput("Out", {"x"});
  feed0->SetAttr("col", 0);
  auto* feed1 = block_desc.AppendOp();
  feed1->SetType("feed");
  feed1->SetInput("X", {"feed"});
  feed1->SetOutput("Out", {"y"});
  feed1->SetAttr("col", 1);
  LOG(INFO) << "create elementwise_add op";
  auto* elt_add = block_desc.AppendOp();
  elt_add->SetType("elementwise_add");
  elt_add->SetInput("X", std::vector<std::string>({"x"}));
  elt_add->SetInput("Y", std::vector<std::string>({"y"}));
  elt_add->SetOutput("Out", std::vector<std::string>({"z"}));
  elt_add->SetAttr("axis", -1);
  LOG(INFO) << "create fetch op";
  auto* fetch = block_desc.AppendOp();
  fetch->SetType("fetch");
  fetch->SetInput("X", std::vector<std::string>({"z"}));
  fetch->SetOutput("Out", std::vector<std::string>({"out"}));
  fetch->SetAttr("col", 0);
  // Set inputs' variable shape in BlockDesc
  AddTensorToBlockDesc(block_, "x", std::vector<int64_t>({2, 4}), true);
  AddTensorToBlockDesc(block_, "y", std::vector<int64_t>({2, 4}), true);
  AddTensorToBlockDesc(block_, "z", std::vector<int64_t>({2, 4}), false);
  AddTensorToBlockDesc(block_, "out", std::vector<int64_t>({2, 4}), false);

  *block_->add_ops() = *feed0->Proto();
  *block_->add_ops() = *feed1->Proto();
  *block_->add_ops() = *elt_add->Proto();
  *block_->add_ops() = *fetch->Proto();

  framework::Scope scope;
#ifdef PADDLE_WITH_CUDA
  platform::CUDAPlace place;
  platform::CUDADeviceContext ctx(place);
#else
  platform::CPUPlace place;
  platform::CPUDeviceContext ctx(place);
#endif
  // Prepare variables.
  std::vector<std::string> repetitive_params{"x", "y"};
  CreateTensor(&scope, "x", std::vector<int64_t>({2, 4}));
  CreateTensor(&scope, "y", std::vector<int64_t>({2, 4}));
  ASSERT_EQ(block_->ops_size(), 4);
  *model = program.Proto()->SerializeAsString();
  serialize_params(param, &scope, repetitive_params);
}

TEST(EngineManager, engine) {
  ASSERT_EQ(
      inference::Singleton<inference::lite::EngineManager>::Global().Empty(),
      true);

  inference::lite::EngineConfig config;
  make_fake_model(&(config.model), &(config.param));
  LOG(INFO) << "prepare config";

  const std::string unique_key("engine_0");
  config.model_from_memory = true;
  config.valid_places = {
#ifdef PADDLE_WITH_CUDA
W
Wilber 已提交
104
      paddle::lite_api::Place({TARGET(kCUDA), PRECISION(kFloat)}),
石晓伟 已提交
105
#endif
W
Wilber 已提交
106 107
      paddle::lite_api::Place({TARGET(kX86), PRECISION(kFloat)}),
      paddle::lite_api::Place({TARGET(kHost), PRECISION(kAny)}),
石晓伟 已提交
108 109 110 111 112 113 114 115 116 117 118 119
  };

  LOG(INFO) << "Create EngineManager";
  inference::Singleton<inference::lite::EngineManager>::Global().Create(
      unique_key, config);
  LOG(INFO) << "Create EngineManager done";
  ASSERT_EQ(
      inference::Singleton<inference::lite::EngineManager>::Global().Empty(),
      false);
  ASSERT_EQ(inference::Singleton<inference::lite::EngineManager>::Global().Has(
                unique_key),
            true);
W
Wilber 已提交
120
  paddle::lite_api::PaddlePredictor* engine_0 =
石晓伟 已提交
121 122 123 124 125 126 127 128 129 130 131 132
      inference::Singleton<inference::lite::EngineManager>::Global().Get(
          unique_key);
  CHECK_NOTNULL(engine_0);
  inference::Singleton<inference::lite::EngineManager>::Global().DeleteAll();
  CHECK(inference::Singleton<inference::lite::EngineManager>::Global().Get(
            unique_key) == nullptr)
      << "the engine_0 should be nullptr";
}

}  // namespace lite
}  // namespace inference
}  // namespace paddle