helper.h 10.7 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License. */

#pragma once

T
tensor-tang 已提交
17
#include <iostream>
18
#include <map>
Y
Yihua Xu 已提交
19
#include <memory>
T
tensor-tang 已提交
20
#include <string>
21 22
#include <unordered_map>
#include <utility>  // for std::move
T
tensor-tang 已提交
23
#include <vector>
W
wanghuancoder 已提交
24

T
tensor-tang 已提交
25 26 27 28 29 30 31 32 33 34
#include "paddle/fluid/operators/jit/gen_base.h"
#include "paddle/fluid/operators/jit/kernel_base.h"
#include "paddle/fluid/operators/jit/kernel_key.h"
#include "paddle/fluid/operators/jit/kernel_pool.h"
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace operators {
namespace jit {

W
wanghuancoder 已提交
35 36
class GenBase;

37
template <typename KernelTuple, typename PlaceType>
T
tensor-tang 已提交
38
inline typename std::enable_if<
39
    std::is_same<typename KernelTuple::data_type, float>::value &&
T
tensor-tang 已提交
40
        std::is_same<PlaceType, platform::CPUPlace>::value,
41
    const Kernel*>::type
42 43
GetJitCode(const typename KernelTuple::attr_type& attr) {
  using Attr = typename KernelTuple::attr_type;
44
  int64_t key = JitCodeKey<Attr>(attr);
45
  auto& codes = JitCodePool<KernelTuple::kernel_type>::Instance();
T
tensor-tang 已提交
46
  if (codes.Has(key)) {
47
    return codes.AllKernels().at(key).get();
T
tensor-tang 已提交
48 49
  }

T
tensor-tang 已提交
50
  // creator is not related with attr, so can use KernelKey as key
51
  KernelKey kkey(KernelTuple::kernel_type, PlaceType());
T
tensor-tang 已提交
52
  // pool: (KernelKey(type, place), vector<GenCreatorPtr>)
53
  auto& creator_map = JitCodeCreatorPool::Instance().AllCreators();
T
tensor-tang 已提交
54 55 56 57 58
  auto iter = creator_map.find(kkey);
  if (iter != creator_map.end()) {
    auto& creators = iter->second;
    for (auto& cur : creators) {
      auto i = dynamic_cast<const JitCodeCreator<Attr>*>(cur.get());
59
      if (i && i->CanBeUsed(attr)) {
T
tensor-tang 已提交
60 61
        auto p = i->CreateJitCode(attr);
        if (p) {
62
          auto res = p.get();
T
tensor-tang 已提交
63
          codes.Insert(key, std::move(p));
64
          return res;
T
tensor-tang 已提交
65 66 67 68
        }
      }
    }
  }
T
tensor-tang 已提交
69 70 71
  return nullptr;
}

72
template <typename KernelTuple, typename PlaceType>
T
tensor-tang 已提交
73
inline typename std::enable_if<
74
    !std::is_same<typename KernelTuple::data_type, float>::value ||
T
tensor-tang 已提交
75
        !std::is_same<PlaceType, platform::CPUPlace>::value,
76
    const Kernel*>::type
77
GetJitCode(const typename KernelTuple::attr_type& attr) {
T
tensor-tang 已提交
78 79 80
  return nullptr;
}

T
tensor-tang 已提交
81 82
// Refer code do not related with attr, which is just for cast
// Refer is always on CPUPlace
83
template <typename KernelTuple>
84 85
inline const Kernel* GetReferKernel() {
  auto& ref_pool = ReferKernelPool::Instance().AllKernels();
86
  KernelKey kkey(KernelTuple::kernel_type, platform::CPUPlace());
T
tensor-tang 已提交
87
  auto ref_iter = ref_pool.find(kkey);
G
GaoWei8 已提交
88 89 90 91
  PADDLE_ENFORCE_NE(
      ref_iter, ref_pool.end(),
      platform::errors::PreconditionNotMet(
          "Every Refer Kernel of jitcode should have reference function."));
T
tensor-tang 已提交
92 93
  auto& ref_impls = ref_iter->second;
  for (auto& impl : ref_impls) {
94
    auto i = dynamic_cast<const ReferKernel<KernelTuple>*>(impl.get());
T
tensor-tang 已提交
95
    if (i) {
96
      return i;
T
tensor-tang 已提交
97 98 99 100 101
    }
  }
  return nullptr;
}

102 103 104 105
template <typename KernelTuple>
inline typename KernelTuple::func_type GetReferFunc() {
  auto ker = GetReferKernel<KernelTuple>();
  auto p = dynamic_cast<const ReferKernel<KernelTuple>*>(ker);
G
GaoWei8 已提交
106 107 108
  PADDLE_ENFORCE_NOT_NULL(p, platform::errors::InvalidArgument(
                                 "Get the reference code of kernel in CPU "
                                 "failed. The Refer kernel should exsit."));
109 110 111 112 113 114
  return p->GetFunc();
}

// Return all Kernels that can be used
template <typename KernelTuple, typename PlaceType>
std::vector<const Kernel*> GetAllCandidateKernels(
115
    const typename KernelTuple::attr_type& attr) {
116 117 118 119 120
  // the search order shoudl be jitcode > more > refer
  std::vector<const Kernel*> res;
  auto jitker = GetJitCode<KernelTuple, PlaceType>(attr);
  if (jitker) {
    res.emplace_back(jitker);
T
tensor-tang 已提交
121
  }
T
tensor-tang 已提交
122

123
  // more kernelpool: (KernelKey(type, place), vector<KernelPtr>)
124
  KernelKey kkey(KernelTuple::kernel_type, PlaceType());
125
  auto& pool = KernelPool::Instance().AllKernels();
T
tensor-tang 已提交
126 127 128 129
  auto iter = pool.find(kkey);
  if (iter != pool.end()) {
    auto& impls = iter->second;
    for (auto& impl : impls) {
130
      auto i = dynamic_cast<const KernelMore<KernelTuple>*>(impl.get());
131 132
      if (i && i->CanBeUsed(attr)) {
        res.emplace_back(i);
T
tensor-tang 已提交
133 134 135 136 137
      }
    }
  }

  // The last implementation should be reference function on CPUPlace.
138
  auto ref = GetReferKernel<KernelTuple>();
G
GaoWei8 已提交
139 140 141
  PADDLE_ENFORCE_NOT_NULL(ref, platform::errors::InvalidArgument(
                                   "Get all candicate kernel in CPU failed. "
                                   "The Refer Kernel can not be empty."));
142 143 144 145 146 147 148 149 150 151 152 153 154 155
  res.emplace_back(ref);
  return res;
}

template <typename KernelTuple, typename PlaceType = platform::CPUPlace>
std::vector<std::pair<std::string, typename KernelTuple::func_type>>
GetAllCandidateFuncsWithTypes(const typename KernelTuple::attr_type& attr) {
  using Func = typename KernelTuple::func_type;
  auto kers = GetAllCandidateKernels<KernelTuple, PlaceType>(attr);
  std::vector<std::pair<std::string, Func>> res;
  for (auto k : kers) {
    std::string name = k->ImplType();
    if (name == "JitCode") {
      auto i = dynamic_cast<const GenBase*>(k);
G
GaoWei8 已提交
156 157 158
      PADDLE_ENFORCE_NOT_NULL(i,
                              platform::errors::InvalidArgument(
                                  "Generate jitcode kernel (GenBase) failed."));
159 160 161
      res.emplace_back(std::make_pair(name, i->template getCode<Func>()));
    } else {
      auto i = dynamic_cast<const KernelMore<KernelTuple>*>(k);
G
GaoWei8 已提交
162 163
      PADDLE_ENFORCE_NOT_NULL(i, platform::errors::InvalidArgument(
                                     "Kernel cast (KernelMore) failed."));
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
      res.emplace_back(std::make_pair(name, i->GetFunc()));
    }
  }
  return res;
}

template <typename KernelTuple, typename PlaceType = platform::CPUPlace>
std::vector<typename KernelTuple::func_type> GetAllCandidateFuncs(
    const typename KernelTuple::attr_type& attr) {
  auto funcs = GetAllCandidateFuncsWithTypes<KernelTuple, PlaceType>(attr);
  std::vector<typename KernelTuple::func_type> res;
  for (auto& i : funcs) {
    res.emplace_back(i.second);
  }
  return res;
}

template <typename KernelTuple, typename PlaceType = platform::CPUPlace>
typename KernelTuple::func_type GetDefaultBestFunc(
    const typename KernelTuple::attr_type& attr) {
  auto funcs = GetAllCandidateFuncs<KernelTuple, PlaceType>(attr);
G
GaoWei8 已提交
185 186 187
  PADDLE_ENFORCE_GE(funcs.size(), 1UL,
                    platform::errors::InvalidArgument(
                        "The candicate jit kernel is at least one in CPU."));
188 189 190 191
  // Here could do some runtime benchmark of this attr and return the best one.
  // But yet just get the first one as the default best one,
  // which is searched in order and tuned by offline.
  return funcs[0];
T
tensor-tang 已提交
192 193
}

194
extern std::map<size_t, std::shared_ptr<void>>& GetFuncCacheMap();
Y
Yihua Xu 已提交
195

196
template <typename KernelTuple, typename PlaceType>
T
tensor-tang 已提交
197
class KernelFuncs {
T
tensor-tang 已提交
198
 public:
T
tensor-tang 已提交
199 200
  KernelFuncs() = default;
  static KernelFuncs& Cache() {
Y
Yihua Xu 已提交
201
    auto& func_cache_map = GetFuncCacheMap();
202
    auto key = typeid(KernelFuncs<KernelTuple, PlaceType>).hash_code();
Y
Yihua Xu 已提交
203 204 205 206 207 208 209 210 211
    auto iter = func_cache_map.find(key);
    if (iter != func_cache_map.end()) {
      return *(KernelFuncs<KernelTuple, PlaceType>*)(iter->second.get());
    } else {
      std::shared_ptr<void> cache =
          std::make_shared<KernelFuncs<KernelTuple, PlaceType>>();
      func_cache_map.emplace(key, cache);
      return *(KernelFuncs<KernelTuple, PlaceType>*)(cache.get());
    }
T
tensor-tang 已提交
212 213
  }

214
  // the exposed interface to use
215 216
  typename KernelTuple::func_type At(
      const typename KernelTuple::attr_type& attr) {
217 218
    // Maybe here is not good enough, not all kernels should have jitcode
    int64_t key = JitCodeKey<typename KernelTuple::attr_type>(attr);
T
tensor-tang 已提交
219 220 221
    if (Has(key)) {
      return funcs_.at(key);
    }
222 223
    // If do not have this attr in cache then get the default best
    auto func = GetDefaultBestFunc<KernelTuple, PlaceType>(attr);
T
tensor-tang 已提交
224 225 226 227
    Insert(key, func);
    return func;
  }

228 229
  typename KernelTuple::func_type operator[](
      const typename KernelTuple::attr_type& attr) {
230 231 232 233 234
    return At(attr);
  }

 protected:
  bool Has(int64_t key) const { return funcs_.find(key) != funcs_.end(); }
235
  void Insert(int64_t key, typename KernelTuple::func_type func) {
236 237 238
    funcs_.emplace(key, func);
  }

T
tensor-tang 已提交
239
 private:
240
  std::unordered_map<int64_t, typename KernelTuple::func_type> funcs_;
T
tensor-tang 已提交
241
  DISABLE_COPY_AND_ASSIGN(KernelFuncs);
T
tensor-tang 已提交
242 243
};

244
const char* to_string(KernelType kt);
245
const char* to_string(SeqPoolType kt);
246

T
tensor-tang 已提交
247 248
KernelType to_kerneltype(const std::string& act);

T
tensor-tang 已提交
249 250 251 252 253 254 255
inline std::ostream& operator<<(std::ostream& os, const lstm_attr_t& attr) {
  os << "dim_size[" << attr.d << "],act_gate[" << to_string(attr.act_gate)
     << "],act_cand[" << to_string(attr.act_cand) << "],act_cell["
     << to_string(attr.act_cell) << "],use_peephole["
     << (attr.use_peephole ? "True" : "False") << "]";
  return os;
}
256

T
tensor-tang 已提交
257 258 259 260 261
inline std::ostream& operator<<(std::ostream& os, const gru_attr_t& attr) {
  os << "dim_size[" << attr.d << "],act_gate[" << to_string(attr.act_gate)
     << "],act_cand[" << to_string(attr.act_cand) << "]";
  return os;
}
262

263 264 265 266 267
inline std::ostream& operator<<(std::ostream& os, const seq_pool_attr_t& attr) {
  os << "height_size[" << attr.h << "],width_size[" << attr.w << "],pool_type["
     << to_string(attr.type) << "]";
  return os;
}
T
tensor-tang 已提交
268

269 270 271 272 273 274 275 276 277
inline std::ostream& operator<<(std::ostream& os,
                                const emb_seq_pool_attr_t& attr) {
  os << "table_height[" << attr.table_height << "],table_width["
     << attr.table_width << "],index_height[" << attr.index_height
     << "],index_width[" << attr.index_width << "],output_width["
     << attr.out_width << "],pool_type[" << to_string(attr.pool_type) << "]";
  return os;
}

278 279 280 281 282
inline std::ostream& operator<<(std::ostream& os, const adam_attr_t& attr) {
  os << "beta1[" << attr.beta1 << "],beta2[" << attr.beta2 << "]";
  return os;
}

283 284 285 286 287 288 289 290
inline std::ostream& operator<<(std::ostream& os, const sgd_attr_t& attr) {
  os << "param_height[" << attr.param_height << "],param_width["
     << attr.param_width << "],grad_height[" << attr.grad_height
     << "],grad_width[" << attr.grad_width << "],selected_rows_size["
     << attr.selected_rows_size << "]";
  return os;
}

291 292 293 294 295 296 297 298 299
inline std::ostream& operator<<(std::ostream& os, const matmul_attr_t& attr) {
  os << "M[" << attr.m << "],N[" << attr.n << "],K[" << attr.k << "]";
  return os;
}

// expose the method to pack matmul weight
template <typename T>
void pack_weights(const T* src, T* dst, int n, int k);

T
tensor-tang 已提交
300 301 302
}  // namespace jit
}  // namespace operators
}  // namespace paddle