op_lite.cc 2.9 KB
Newer Older
S
superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

S
update  
superjomn 已提交
15
#include "paddle/fluid/lite/core/op_lite.h"
S
superjomn 已提交
16 17 18 19
#include <list>
#include <set>
#include <utility>
#include <vector>
S
update  
superjomn 已提交
20 21 22 23 24 25
#include "paddle/fluid/lite/core/op_registry.h"

namespace paddle {
namespace lite {

std::vector<std::unique_ptr<KernelBase>> OpLite::CreateKernels(
S
superjomn 已提交
26
    const std::vector<Place> &places, const std::string &kernel_type) {
S
update  
superjomn 已提交
27 28 29
  std::vector<std::unique_ptr<KernelBase>> kernels;
  CHECK(!op_type_.empty()) << "op_type_ should be set first";

30
  auto pick_kernel = [&](const Place &place) {
31
    auto ks = KernelRegistry::Global().Create(
S
superjomn 已提交
32
        (kernel_type.empty() ? op_type_ : kernel_type), place.target,
S
superjomn 已提交
33
        place.precision, place.layout);
34
    for (auto &&it : ks) {
35
      AttachKernel(it.get());
36 37
      kernels.emplace_back(std::move(it));
    }
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
  };

  std::set<Place> place_set;
  for (auto place : places) {
    place_set.insert(place);
    // Pick kernels those support any Precision and any DataLayout
    place.precision = PRECISION(kAny);
    place_set.insert(place);
    place.layout = DATALAYOUT(kAny);
    place_set.insert(place);
  }

  std::set<TargetType> targets;
  for (auto place : place_set) {
    pick_kernel(place);
    targets.insert(place.target);
S
update  
superjomn 已提交
54 55
  }

S
superjomn 已提交
56
  CHECK(!kernels.empty()) << "No kernel found for Op " << op_type_;
S
Superjomn 已提交
57
  VLOG(2) << "op " << op_type_ << " get " << kernels.size() << " kernels";
S
update  
superjomn 已提交
58 59 60
  return kernels;
}

S
superjomn 已提交
61 62 63 64
bool OpLite::Run() {
  CHECK(kernel_);
  SyncInputEvents();

65
  kernel_->Launch();
S
superjomn 已提交
66 67 68 69 70

  RecordOutputEvents();
  return true;
}

Y
Yan Chunwei 已提交
71
bool OpLite::Attach(const cpp::OpDesc &opdesc, lite::Scope *scope) {
72 73
  // valid_places_.clear();
  CHECK(scope != nullptr);
74
  // CHECK(!op_info_.get());
S
superjomn 已提交
75
  scope_ = scope;
Y
Yan Chunwei 已提交
76 77
  op_info_.reset(
      new OpInfo(opdesc));  // Force clean the out-of-date infomation.
78 79 80
  return AttachImpl(opdesc, scope);
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94
const Tensor *OpLite::GetTensor(lite::Scope *scope,
                                const std::string &name) const {
  auto *var = scope->FindVar(name);
  CHECK(var) << "no variable called " << name << " found";
  return &var->Get<lite::Tensor>();
}

Tensor *OpLite::GetMutableTensor(lite::Scope *scope,
                                 const std::string &name) const {
  auto *var = scope->FindVar(name);
  CHECK(var) << "no variable called " << name << " found";
  return var->GetMutable<lite::Tensor>();
}

S
update  
superjomn 已提交
95 96
}  // namespace lite
}  // namespace paddle