op_registry.cc 4.2 KB
Newer Older
S
superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

S
superjomn 已提交
15
#include "paddle/fluid/lite/core/op_registry.h"
S
superjomn 已提交
16 17
#include <list>
#include <set>
S
superjomn 已提交
18 19 20 21

namespace paddle {
namespace lite {

22
std::list<std::unique_ptr<KernelBase>> KernelRegistry::Create(
S
superjomn 已提交
23 24 25
    const std::string &op_type, TargetType target, PrecisionType precision,
    DataLayoutType layout) {
  Place place{target, precision, layout};
S
Superjomn 已提交
26
  VLOG(5) << "creating " << op_type << " kernel for " << place;
S
superjomn 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
#define CREATE_KERNEL1(target__, precision__)                                \
  switch (layout) {                                                          \
    case DATALAYOUT(kNCHW):                                                  \
      return Create<TARGET(target__), PRECISION(precision__),                \
                    DATALAYOUT(kNCHW)>(op_type);                             \
    case DATALAYOUT(kAny):                                                   \
      return Create<TARGET(target__), PRECISION(precision__),                \
                    DATALAYOUT(kAny)>(op_type);                              \
    default:                                                                 \
      LOG(FATAL) << "unsupported kernel layout " << DataLayoutToStr(layout); \
  }

#define CREATE_KERNEL(target__)                         \
  switch (precision) {                                  \
    case PRECISION(kFloat):                             \
      CREATE_KERNEL1(target__, kFloat);                 \
    case PRECISION(kInt8):                              \
      CREATE_KERNEL1(target__, kInt8);                  \
    case PRECISION(kAny):                               \
      CREATE_KERNEL1(target__, kAny);                   \
    default:                                            \
      CHECK(false) << "not supported kernel precision " \
                   << PrecisionToStr(precision);        \
S
superjomn 已提交
50 51 52 53 54 55 56 57 58 59 60 61
  }

  switch (target) {
    case TARGET(kHost): {
      CREATE_KERNEL(kHost);
    } break;
    case TARGET(kX86): {
      CREATE_KERNEL(kX86);
    } break;
    case TARGET(kCUDA): {
      CREATE_KERNEL(kCUDA);
    } break;
62 63 64
    case TARGET(kARM): {
      CREATE_KERNEL(kARM);
    } break;
S
superjomn 已提交
65
    default:
S
superjomn 已提交
66
      CHECK(false) << "not supported kernel target " << TargetToStr(target);
S
superjomn 已提交
67 68 69
  }

#undef CREATE_KERNEL
70
  return std::list<std::unique_ptr<KernelBase>>();
S
superjomn 已提交
71 72
}

73 74 75 76
KernelRegistry::KernelRegistry()
    : registries_(static_cast<int>(TARGET(NUM)) *
                  static_cast<int>(PRECISION(NUM)) *
                  static_cast<int>(DATALAYOUT(NUM))) {
S
superjomn 已提交
77
#define INIT_FOR(target__, precision__, layout__)                            \
S
superjomn 已提交
78
  registries_[KernelRegistry::GetKernelOffset<TARGET(target__),              \
S
superjomn 已提交
79 80 81 82 83 84
                                              PRECISION(precision__),        \
                                              DATALAYOUT(layout__)>()]       \
      .set<KernelRegistryForTarget<TARGET(target__), PRECISION(precision__), \
                                   DATALAYOUT(layout__)> *>(                 \
          &KernelRegistryForTarget<TARGET(target__), PRECISION(precision__), \
                                   DATALAYOUT(layout__)>::Global());
S
superjomn 已提交
85
  // Currently, just register 2 kernel targets.
S
superjomn 已提交
86 87
  INIT_FOR(kCUDA, kFloat, kNCHW);
  INIT_FOR(kCUDA, kAny, kNCHW);
88 89
  INIT_FOR(kCUDA, kAny, kAny);

S
superjomn 已提交
90 91 92
  INIT_FOR(kHost, kFloat, kNCHW);
  INIT_FOR(kHost, kAny, kNCHW);
  INIT_FOR(kHost, kAny, kAny);
93

94 95 96 97
  INIT_FOR(kX86, kFloat, kNCHW);
  INIT_FOR(kX86, kAny, kNCHW);
  INIT_FOR(kX86, kAny, kAny);

98 99 100
  INIT_FOR(kARM, kFloat, kNCHW);
  INIT_FOR(kARM, kAny, kNCHW);
  INIT_FOR(kARM, kAny, kAny);
S
superjomn 已提交
101 102 103 104 105 106 107 108 109
#undef INIT_FOR
}

KernelRegistry &KernelRegistry::Global() {
  static auto *x = new KernelRegistry;
  return *x;
}

}  // namespace lite
S
superjomn 已提交
110
}  // namespace paddle