op_lite.cc 3.2 KB
Newer Older
S
superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

S
update  
superjomn 已提交
15
#include "paddle/fluid/lite/core/op_lite.h"
S
superjomn 已提交
16 17 18 19
#include <list>
#include <set>
#include <utility>
#include <vector>
S
update  
superjomn 已提交
20 21 22 23 24 25
#include "paddle/fluid/lite/core/op_registry.h"

namespace paddle {
namespace lite {

std::vector<std::unique_ptr<KernelBase>> OpLite::CreateKernels(
S
superjomn 已提交
26
    const std::vector<Place> &places, const std::string &kernel_type) {
S
update  
superjomn 已提交
27 28 29
  std::vector<std::unique_ptr<KernelBase>> kernels;
  CHECK(!op_type_.empty()) << "op_type_ should be set first";

30
  auto pick_kernel = [&](const Place &place) {
31 32
    auto ks = KernelRegistry::Global().Create(op_type_, place.target,
                                              place.precision, place.layout);
C
Chunwei 已提交
33 34
    VLOG(5) << "pick kernel for " << op_info()->Type() << " " << place
            << " get " << ks.size() << " kernels";
35
    for (auto &&it : ks) {
36
      AttachKernel(it.get());
37 38
      kernels.emplace_back(std::move(it));
    }
39 40
  };

41 42 43 44 45 46 47 48 49
  if (!kernel_type.empty()) {
    Place place;
    std::string op_type, alias;
    KernelBase::ParseKernelType(kernel_type, &op_type, &alias, &place);
    pick_kernel(place);
    CHECK(!kernels.empty()) << "no kernel for kernel type " << kernel_type;
    return kernels;
  }

50 51 52 53 54 55 56 57 58 59 60 61 62 63
  std::set<Place> place_set;
  for (auto place : places) {
    place_set.insert(place);
    // Pick kernels those support any Precision and any DataLayout
    place.precision = PRECISION(kAny);
    place_set.insert(place);
    place.layout = DATALAYOUT(kAny);
    place_set.insert(place);
  }

  std::set<TargetType> targets;
  for (auto place : place_set) {
    pick_kernel(place);
    targets.insert(place.target);
S
update  
superjomn 已提交
64 65
  }

S
Superjomn 已提交
66
  VLOG(2) << "op " << op_type_ << " get " << kernels.size() << " kernels";
S
update  
superjomn 已提交
67 68 69
  return kernels;
}

S
superjomn 已提交
70 71 72 73
bool OpLite::Run() {
  CHECK(kernel_);
  SyncInputEvents();

74
  kernel_->Launch();
S
superjomn 已提交
75 76 77 78 79

  RecordOutputEvents();
  return true;
}

Y
Yan Chunwei 已提交
80
bool OpLite::Attach(const cpp::OpDesc &opdesc, lite::Scope *scope) {
81 82
  // valid_places_.clear();
  CHECK(scope != nullptr);
83
  // CHECK(!op_info_.get());
S
superjomn 已提交
84
  scope_ = scope;
Y
Yan Chunwei 已提交
85 86
  op_info_.reset(
      new OpInfo(opdesc));  // Force clean the out-of-date infomation.
C
Chunwei 已提交
87
  return AttachImpl(*op_info(), scope);
88 89
}

90 91 92 93 94 95 96 97 98 99 100 101 102 103
const Tensor *OpLite::GetTensor(lite::Scope *scope,
                                const std::string &name) const {
  auto *var = scope->FindVar(name);
  CHECK(var) << "no variable called " << name << " found";
  return &var->Get<lite::Tensor>();
}

Tensor *OpLite::GetMutableTensor(lite::Scope *scope,
                                 const std::string &name) const {
  auto *var = scope->FindVar(name);
  CHECK(var) << "no variable called " << name << " found";
  return var->GetMutable<lite::Tensor>();
}

S
update  
superjomn 已提交
104 105
}  // namespace lite
}  // namespace paddle