helper.cc 4.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License. */

#include "paddle/fluid/operators/jit/helper.h"
16
#include <numeric>
17
#include "paddle/fluid/platform/enforce.h"
18 19 20 21 22

namespace paddle {
namespace operators {
namespace jit {

23 24
std::map<size_t, std::shared_ptr<void>>& GetFuncCacheMap() {
  static thread_local std::map<size_t, std::shared_ptr<void>> g_func_cache_map;
Y
Yihua Xu 已提交
25 26 27
  return g_func_cache_map;
}

28 29 30 31
#define ONE_CASE(key) \
  case key:           \
    return #key

32 33
const char* to_string(KernelType kt) {
  switch (kt) {
34
    ONE_CASE(kNone);
T
tensor-tang 已提交
35 36 37 38 39
    ONE_CASE(kVMul);
    ONE_CASE(kVAdd);
    ONE_CASE(kVAddRelu);
    ONE_CASE(kVSub);
    ONE_CASE(kVScal);
40
    ONE_CASE(kStrideScal);
T
tensor-tang 已提交
41 42
    ONE_CASE(kVAddBias);
    ONE_CASE(kVRelu);
43
    ONE_CASE(kVBroadcast);
44
    ONE_CASE(kVCopy);
T
tensor-tang 已提交
45 46
    ONE_CASE(kVIdentity);
    ONE_CASE(kVExp);
T
tensor-tang 已提交
47
    ONE_CASE(kVSquare);
T
tensor-tang 已提交
48 49 50 51 52 53 54 55 56 57
    ONE_CASE(kVSigmoid);
    ONE_CASE(kVTanh);
    ONE_CASE(kLSTMCtHt);
    ONE_CASE(kLSTMC1H1);
    ONE_CASE(kGRUH1);
    ONE_CASE(kGRUHtPart1);
    ONE_CASE(kGRUHtPart2);
    ONE_CASE(kCRFDecoding);
    ONE_CASE(kLayerNorm);
    ONE_CASE(kNCHW16CMulNC);
58
    ONE_CASE(kSeqPool);
T
tensor-tang 已提交
59
    ONE_CASE(kMatMul);
60
    ONE_CASE(kHMax);
61
    ONE_CASE(kAdam);
62
    ONE_CASE(kHSum);
D
dengkaipeng 已提交
63
    ONE_CASE(kStrideASum);
64
    ONE_CASE(kSoftmax);
65
    ONE_CASE(kEmbSeqPool);
66
    ONE_CASE(kSgd);
67
    default:
G
GaoWei8 已提交
68 69
      PADDLE_THROW(platform::errors::Unimplemented(
          "JIT kernel do not support type: %d.", kt));
70 71 72 73
      return "NOT JITKernel";
  }
  return nullptr;
}
74 75 76 77 78 79 80 81

const char* to_string(SeqPoolType tp) {
  switch (tp) {
    ONE_CASE(kNonePoolType);
    ONE_CASE(kSum);
    ONE_CASE(kAvg);
    ONE_CASE(kSqrt);
    default:
G
GaoWei8 已提交
82 83
      PADDLE_THROW(platform::errors::Unimplemented(
          "SeqPool JIT kernel do not support type: %d.", tp));
84 85 86 87
      return "NOT PoolType";
  }
  return nullptr;
}
88
#undef ONE_CASE
89

T
tensor-tang 已提交
90 91 92 93
KernelType to_kerneltype(const std::string& act) {
  std::string lower = act;
  std::transform(lower.begin(), lower.end(), lower.begin(), ::tolower);
  if (lower == "relu" || lower == "vrelu") {
T
tensor-tang 已提交
94
    return kVRelu;
T
tensor-tang 已提交
95
  } else if (lower == "identity" || lower == "videntity" || lower == "") {
T
tensor-tang 已提交
96
    return kVIdentity;
T
tensor-tang 已提交
97
  } else if (lower == "exp" || lower == "vexp") {
T
tensor-tang 已提交
98
    return kVExp;
T
tensor-tang 已提交
99
  } else if (lower == "sigmoid" || lower == "vsigmoid") {
T
tensor-tang 已提交
100
    return kVSigmoid;
T
tensor-tang 已提交
101
  } else if (lower == "tanh" || lower == "vtanh") {
T
tensor-tang 已提交
102
    return kVTanh;
T
tensor-tang 已提交
103
  }
G
GaoWei8 已提交
104 105
  PADDLE_THROW(platform::errors::Unimplemented(
      "Act JIT kernel do not support type: %s.", act));
T
tensor-tang 已提交
106
  return kNone;
T
tensor-tang 已提交
107 108
}

109 110 111 112 113
template <>
void pack_weights<float>(const float* src, float* dst, int n, int k) {
  int block, rest;
  const auto groups = packed_groups(n, k, &block, &rest);
  std::for_each(groups.begin(), groups.end(), [&](int i) {
G
GaoWei8 已提交
114 115 116 117
    PADDLE_ENFORCE_GT(i, 0, platform::errors::InvalidArgument(
                                "Each element of groups should be larger than "
                                "0. However the element: %d doesn't satify.",
                                i));
118 119 120 121
  });
  int sum = std::accumulate(groups.begin(), groups.end(), 0);
  std::memset(dst, 0, k * sum * block * sizeof(float));
  PADDLE_ENFORCE_GE(sum * block, n,
G
GaoWei8 已提交
122 123 124 125 126
                    platform::errors::InvalidArgument(
                        "The packed n (sum * block) should be equal to or "
                        "larger than n (matmul row size). "
                        "However, the packed n is %d and n is %d.",
                        sum * block, n));
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147

  const int block_len = sizeof(float) * block;
  int n_offset = 0;

  for (size_t g = 0; g < groups.size(); ++g) {
    const float* from = src + n_offset;
    for (int j = 0; j < k; ++j) {
      size_t copy_sz = groups[g] * block_len;
      if (g == groups.size() - 1 && rest != 0) {
        copy_sz = (groups[g] - 1) * block_len + rest * sizeof(float);
      }
      std::memcpy(dst, from + j * n, copy_sz);
      dst += groups[g] * block;
    }
    n_offset += groups[g] * block;
  }
}

template <typename T>
typename std::enable_if<!std::is_same<T, float>::value>::type pack_weights(
    const T* src, T* dst, int n, int k) {
G
GaoWei8 已提交
148 149
  PADDLE_THROW(platform::errors::Unimplemented(
      "Only supports pack weights with float type."));
150 151
}

152 153 154
}  // namespace jit
}  // namespace operators
}  // namespace paddle