helper.h 6.4 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License. */

#pragma once

T
tensor-tang 已提交
17
#include <iostream>
T
tensor-tang 已提交
18 19 20 21 22 23 24 25 26 27 28 29
#include <string>
#include <vector>
#include "paddle/fluid/operators/jit/gen_base.h"
#include "paddle/fluid/operators/jit/kernel_base.h"
#include "paddle/fluid/operators/jit/kernel_key.h"
#include "paddle/fluid/operators/jit/kernel_pool.h"
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace operators {
namespace jit {

T
tensor-tang 已提交
30
template <KernelType KT, typename KernelTuples, typename PlaceType>
T
tensor-tang 已提交
31 32 33 34
inline typename std::enable_if<
    std::is_same<typename KernelTuples::data_type, float>::value &&
        std::is_same<PlaceType, platform::CPUPlace>::value,
    typename KernelTuples::func_type>::type
T
tensor-tang 已提交
35
GetJitCode(const typename KernelTuples::attr_type& attr) {
T
tensor-tang 已提交
36 37
  using Func = typename KernelTuples::func_type;
  using Attr = typename KernelTuples::attr_type;
T
tensor-tang 已提交
38 39 40 41 42 43
  size_t key = JitCodeKey<Attr>(attr);
  auto& codes = JitCodePool<KT>().Instance();
  if (codes.Has(key)) {
    return codes.AllKernels().at(key)->template getCode<Func>();
  }

T
tensor-tang 已提交
44
  // creator is not related with attr, so can use KernelKey as key
T
tensor-tang 已提交
45
  KernelKey kkey(KT, PlaceType());
T
tensor-tang 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58
  // pool: (KernelKey(type, place), vector<GenCreatorPtr>)
  auto& creator_map = JitCodeCreatorPool().Instance().AllCreators();
  auto iter = creator_map.find(kkey);
  if (iter != creator_map.end()) {
    auto& creators = iter->second;
    for (auto& cur : creators) {
      auto i = dynamic_cast<const JitCodeCreator<Attr>*>(cur.get());
      if (i && i->UseMe(attr)) {
        auto p = i->CreateJitCode(attr);
        if (p) {
          auto f = p->template getCode<Func>();
          codes.Insert(key, std::move(p));
          return f;
T
tensor-tang 已提交
59 60 61 62
        }
      }
    }
  }
T
tensor-tang 已提交
63 64 65
  return nullptr;
}

T
tensor-tang 已提交
66 67 68 69 70
template <KernelType KT, typename KernelTuples, typename PlaceType>
inline typename std::enable_if<
    !std::is_same<typename KernelTuples::data_type, float>::value ||
        !std::is_same<PlaceType, platform::CPUPlace>::value,
    typename KernelTuples::func_type>::type
T
tensor-tang 已提交
71
GetJitCode(const typename KernelTuples::attr_type& attr) {
T
tensor-tang 已提交
72 73 74
  return nullptr;
}

T
tensor-tang 已提交
75 76
// Refer code do not related with attr, which is just for cast
// Refer is always on CPUPlace
T
tensor-tang 已提交
77 78
template <KernelType KT, typename KernelTuples>
inline typename KernelTuples::func_type GetRefer() {
T
tensor-tang 已提交
79 80 81 82 83 84 85
  auto& ref_pool = ReferKernelPool().Instance().AllKernels();
  KernelKey kkey(KT, platform::CPUPlace());
  auto ref_iter = ref_pool.find(kkey);
  PADDLE_ENFORCE(ref_iter != ref_pool.end(),
                 "Every Kernel should have reference function.");
  auto& ref_impls = ref_iter->second;
  for (auto& impl : ref_impls) {
T
tensor-tang 已提交
86
    auto i = dynamic_cast<const ReferKernel<KernelTuples>*>(impl.get());
T
tensor-tang 已提交
87 88 89 90 91 92 93
    if (i) {
      return i->GetFunc();
    }
  }
  return nullptr;
}

T
tensor-tang 已提交
94
template <KernelType KT, typename KernelTuples,
T
tensor-tang 已提交
95
          typename PlaceType = platform::CPUPlace>
T
tensor-tang 已提交
96 97
typename KernelTuples::func_type Get(
    const typename KernelTuples::attr_type& attr) {
T
tensor-tang 已提交
98
  auto jitfunc = GetJitCode<KT, KernelTuples, PlaceType>(attr);
T
tensor-tang 已提交
99 100 101
  if (jitfunc) {
    return jitfunc;
  }
T
tensor-tang 已提交
102 103

  // pool: (KernelKey(type, place), vector<KernelPtr>)
T
tensor-tang 已提交
104
  KernelKey kkey(KT, PlaceType());
T
tensor-tang 已提交
105 106 107 108 109
  auto& pool = KernelPool().Instance().AllKernels();
  auto iter = pool.find(kkey);
  if (iter != pool.end()) {
    auto& impls = iter->second;
    for (auto& impl : impls) {
T
tensor-tang 已提交
110
      auto i = dynamic_cast<const KernelMore<KernelTuples>*>(impl.get());
T
tensor-tang 已提交
111 112 113 114 115 116 117
      if (i && i->UseMe(attr)) {
        return i->GetFunc();
      }
    }
  }

  // The last implementation should be reference function on CPUPlace.
T
tensor-tang 已提交
118
  return GetRefer<KT, KernelTuples>();
T
tensor-tang 已提交
119 120
}

T
tensor-tang 已提交
121 122
template <KernelType KT, typename KernelTuples, typename PlaceType>
class KernelFuncs {
T
tensor-tang 已提交
123
 public:
T
tensor-tang 已提交
124 125 126
  KernelFuncs() = default;
  static KernelFuncs& Cache() {
    static thread_local KernelFuncs<KT, KernelTuples, PlaceType> g_func_cache;
T
tensor-tang 已提交
127 128 129 130 131 132 133 134 135
    return g_func_cache;
  }

  bool Has(int key) const { return funcs_.find(key) != funcs_.end(); }

  void Insert(int key, typename KernelTuples::func_type func) {
    funcs_.emplace(key, func);
  }

T
tensor-tang 已提交
136 137 138 139 140 141 142 143 144
  typename KernelTuples::func_type At(int key) {
    if (Has(key)) {
      return funcs_.at(key);
    }
    auto func = Get<KT, KernelTuples, PlaceType>(key);
    Insert(key, func);
    return func;
  }

T
tensor-tang 已提交
145 146
 private:
  std::unordered_map<int, typename KernelTuples::func_type> funcs_;
T
tensor-tang 已提交
147
  DISABLE_COPY_AND_ASSIGN(KernelFuncs);
T
tensor-tang 已提交
148 149
};

150
const char* to_string(KernelType kt);
151
const char* to_string(SeqPoolType kt);
152

T
tensor-tang 已提交
153 154
KernelType to_kerneltype(const std::string& act);

T
tensor-tang 已提交
155 156 157 158 159 160 161
inline std::ostream& operator<<(std::ostream& os, const lstm_attr_t& attr) {
  os << "dim_size[" << attr.d << "],act_gate[" << to_string(attr.act_gate)
     << "],act_cand[" << to_string(attr.act_cand) << "],act_cell["
     << to_string(attr.act_cell) << "],use_peephole["
     << (attr.use_peephole ? "True" : "False") << "]";
  return os;
}
162

T
tensor-tang 已提交
163 164 165 166 167
inline std::ostream& operator<<(std::ostream& os, const gru_attr_t& attr) {
  os << "dim_size[" << attr.d << "],act_gate[" << to_string(attr.act_gate)
     << "],act_cand[" << to_string(attr.act_cand) << "]";
  return os;
}
168

169 170 171 172 173
inline std::ostream& operator<<(std::ostream& os, const seq_pool_attr_t& attr) {
  os << "height_size[" << attr.h << "],width_size[" << attr.w << "],pool_type["
     << to_string(attr.type) << "]";
  return os;
}
T
tensor-tang 已提交
174

175 176 177 178 179 180 181 182 183
inline std::ostream& operator<<(std::ostream& os,
                                const emb_seq_pool_attr_t& attr) {
  os << "table_height[" << attr.table_height << "],table_width["
     << attr.table_width << "],index_height[" << attr.index_height
     << "],index_width[" << attr.index_width << "],output_width["
     << attr.out_width << "],pool_type[" << to_string(attr.pool_type) << "]";
  return os;
}

184 185 186 187 188 189 190 191 192
inline std::ostream& operator<<(std::ostream& os, const matmul_attr_t& attr) {
  os << "M[" << attr.m << "],N[" << attr.n << "],K[" << attr.k << "]";
  return os;
}

// expose the method to pack matmul weight
template <typename T>
void pack_weights(const T* src, T* dst, int n, int k);

T
tensor-tang 已提交
193 194 195
}  // namespace jit
}  // namespace operators
}  // namespace paddle