cpu_vec.h 13.3 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
T
tensor-tang 已提交
16
#include <cmath>
T
tensor-tang 已提交
17
#include <functional>
18
#include <string>
T
tensor-tang 已提交
19
#include "paddle/fluid/platform/cpu_info.h"
T
tensor-tang 已提交
20
#include "paddle/fluid/platform/enforce.h"
21 22 23 24
#ifdef __AVX__
#include <immintrin.h>
#endif

T
tensor-tang 已提交
25 26 27
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#endif
T
tensor-tang 已提交
28 29 30 31 32 33 34 35

namespace paddle {
namespace operators {
namespace math {

#define SIGMOID_THRESHOLD_MIN -40.0
#define SIGMOID_THRESHOLD_MAX 13.0

36
#define YMM_FLOAT_BLOCK 8
T
tensor-tang 已提交
37
#define AVX_DOUBLE_BLOCK 4
38
#define YMM_FLOAT_BLOCK 8
T
tensor-tang 已提交
39
#define AVX2_DOUBLE_BLOCK 4
40
#define ZMM_FLOAT_BLOCK 16
T
tensor-tang 已提交
41 42
#define AVX512_DOUBLE_BLOCK 8

T
tensor-tang 已提交
43
template <typename T>
T
tensor-tang 已提交
44 45 46 47
inline void vec_exp(const int n, const T* x, T* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = std::exp(x[i]);
  }
T
tensor-tang 已提交
48 49
}

50 51 52 53 54 55 56
template <typename T>
inline void vec_scal(const int n, const T a, T* x) {
  for (int i = 0; i < n; ++i) {
    x[i] = a * x[i];
  }
}

T
tensor-tang 已提交
57 58 59 60
#ifdef PADDLE_WITH_MKLML
template <>
inline void vec_exp<float>(const int n, const float* x, float* y) {
  platform::dynload::vsExp(n, x, y);
T
tensor-tang 已提交
61 62
}

T
tensor-tang 已提交
63 64 65 66
template <>
inline void vec_exp<double>(const int n, const double* x, double* y) {
  platform::dynload::vdExp(n, x, y);
}
67 68 69 70 71 72 73 74 75 76 77 78 79

template <>
inline void vec_scal<float>(const int n, const float a, float* x) {
  platform::dynload::cblas_sscal(n, a, x, 1);
}

template <>
inline void vec_scal<double>(const int n, const double a, double* x) {
  platform::dynload::cblas_dscal(n, a, x, 1);
}
#endif

// MKL scal only support inplace, choose this if src and dst are not equal
T
tensor-tang 已提交
80
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
81 82 83 84 85 86 87
inline void vec_scal(const int n, const T a, const T* x, T* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = a * x[i];
  }
}

template <>
T
tensor-tang 已提交
88 89
inline void vec_scal<float, platform::avx>(const int n, const float a,
                                           const float* x, float* y) {
90
#ifdef __AVX__
91
  constexpr int block = YMM_FLOAT_BLOCK;
T
tensor-tang 已提交
92
  if (n < block) {
T
tensor-tang 已提交
93
    vec_scal<float, platform::isa_any>(n, a, x, y);
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
    return;
  }
  const int rest = n % block;
  const int end = n - rest;
  int i = 0;
  __m256 scalar = _mm256_set1_ps(a);
  __m256 tmp;
#define MOVE_ONE_STEP               \
  tmp = _mm256_loadu_ps(x + i);     \
  tmp = _mm256_mul_ps(tmp, scalar); \
  _mm256_storeu_ps(y + i, tmp)
  for (i = 0; i < end; i += block) {
    MOVE_ONE_STEP;
  }
#undef MOVE_ONE_STEP
  if (rest == 0) {
    return;
  }
  // can not continue move step if src and dst are inplace
  for (i = n - rest; i < n; ++i) {
    y[i] = a * x[i];
  }
#else
T
tensor-tang 已提交
117
  vec_scal<float, platform::isa_any>(n, a, x, y);
T
tensor-tang 已提交
118
#endif
119 120 121
}

template <>
T
tensor-tang 已提交
122 123 124
inline void vec_scal<float, platform::avx2>(const int n, const float a,
                                            const float* x, float* y) {
  vec_scal<float, platform::avx>(n, a, x, y);
125 126 127
}

template <>
T
tensor-tang 已提交
128 129
inline void vec_scal<float, platform::avx512f>(const int n, const float a,
                                               const float* x, float* y) {
130
  // TODO(TJ): enable me
T
tensor-tang 已提交
131
  vec_scal<float, platform::avx2>(n, a, x, y);
132
}
T
tensor-tang 已提交
133

T
tensor-tang 已提交
134
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
T
tensor-tang 已提交
135 136 137 138 139 140 141
inline void vec_bias_sub(const int n, const T a, const T* x, T* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = a - x[i];
  }
}

template <>
T
tensor-tang 已提交
142 143
inline void vec_bias_sub<float, platform::avx>(const int n, const float a,
                                               const float* x, float* y) {
T
tensor-tang 已提交
144
#ifdef __AVX__
145
  constexpr int block = YMM_FLOAT_BLOCK;
T
tensor-tang 已提交
146
  if (n < block) {
T
tensor-tang 已提交
147
    vec_bias_sub<float, platform::isa_any>(n, a, x, y);
T
tensor-tang 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
    return;
  }
  const int rest = n % block;
  const int end = n - rest;
  int i = 0;
  __m256 bias = _mm256_set1_ps(a);
  __m256 tmp;
#define MOVE_ONE_STEP             \
  tmp = _mm256_loadu_ps(x + i);   \
  tmp = _mm256_sub_ps(bias, tmp); \
  _mm256_storeu_ps(y + i, tmp)
  for (i = 0; i < end; i += block) {
    MOVE_ONE_STEP;
  }
#undef MOVE_ONE_STEP
  if (rest == 0) {
    return;
  }
  // can not continue move step if src and dst are inplace
  for (i = n - rest; i < n; ++i) {
    y[i] = a - x[i];
  }
#else
T
tensor-tang 已提交
171
  vec_bias_sub<float, platform::isa_any>(n, a, x, y);
T
tensor-tang 已提交
172 173 174 175
#endif
}

template <>
T
tensor-tang 已提交
176 177 178
inline void vec_bias_sub<float, platform::avx2>(const int n, const float a,
                                                const float* x, float* y) {
  vec_bias_sub<float, platform::avx>(n, a, x, y);
T
tensor-tang 已提交
179 180 181
}

template <>
T
tensor-tang 已提交
182 183
inline void vec_bias_sub<float, platform::avx512f>(const int n, const float a,
                                                   const float* x, float* y) {
T
tensor-tang 已提交
184
  // TODO(TJ): enable me
T
tensor-tang 已提交
185
  vec_bias_sub<float, platform::avx2>(n, a, x, y);
T
tensor-tang 已提交
186 187
}

T
tensor-tang 已提交
188
// out = x*y + (1-x)*z
T
tensor-tang 已提交
189
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
T
tensor-tang 已提交
190 191 192 193 194 195 196
inline void vec_cross(const int n, const T* x, const T* y, const T* z, T* out) {
  for (int i = 0; i < n; ++i) {
    out[i] = x[i] * y[i] + (static_cast<T>(1) - x[i]) * z[i];
  }
}

template <>
T
tensor-tang 已提交
197 198 199
inline void vec_cross<float, platform::avx>(const int n, const float* x,
                                            const float* y, const float* z,
                                            float* out) {
T
tensor-tang 已提交
200
#ifdef __AVX__
201
  constexpr int block = YMM_FLOAT_BLOCK;
T
tensor-tang 已提交
202
  if (n < block) {
T
tensor-tang 已提交
203
    vec_cross<float, platform::isa_any>(n, x, y, z, out);
T
tensor-tang 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
    return;
  }
  const int rest = n % block;
  const int end = n - rest;
  int i = 0;
  __m256 bias = _mm256_set1_ps(1.f);
  __m256 tmpx, tmpy, tmpz;
  for (i = 0; i < end; i += block) {
    tmpx = _mm256_loadu_ps(x + i);
    tmpy = _mm256_loadu_ps(y + i);
    tmpz = _mm256_loadu_ps(z + i);
    tmpy = _mm256_mul_ps(tmpx, tmpy);
    tmpx = _mm256_sub_ps(bias, tmpx);
    tmpz = _mm256_mul_ps(tmpx, tmpz);
    tmpz = _mm256_add_ps(tmpy, tmpz);
    _mm256_storeu_ps(out + i, tmpz);
  }
  if (rest == 0) {
    return;
  }
  // can not continue move step if src and dst are inplace
  for (i = n - rest; i < n; ++i) {
    out[i] = x[i] * y[i] + (1.f - x[i]) * z[i];
  }
#else
T
tensor-tang 已提交
229
  vec_cross<float, platform::isa_any>(n, x, y, z, out);
T
tensor-tang 已提交
230 231 232 233
#endif
}

template <>
T
tensor-tang 已提交
234 235 236 237
inline void vec_cross<float, platform::avx2>(const int n, const float* x,
                                             const float* y, const float* z,
                                             float* out) {
  vec_cross<float, platform::avx>(n, x, y, z, out);
T
tensor-tang 已提交
238 239 240
}

template <>
T
tensor-tang 已提交
241 242 243
inline void vec_cross<float, platform::avx512f>(const int n, const float* x,
                                                const float* y, const float* z,
                                                float* out) {
T
tensor-tang 已提交
244
  // TODO(TJ): enable me
T
tensor-tang 已提交
245
  vec_cross<float, platform::avx>(n, x, y, z, out);
T
tensor-tang 已提交
246 247
}

T
tensor-tang 已提交
248
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
T
tensor-tang 已提交
249 250 251 252 253 254 255
inline void vec_add_bias(const int n, const T a, const T* x, T* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = x[i] + a;
  }
}

template <>
T
tensor-tang 已提交
256 257
inline void vec_add_bias<float, platform::avx>(const int n, const float a,
                                               const float* x, float* y) {
T
tensor-tang 已提交
258
#ifdef __AVX__
259
  constexpr int block = YMM_FLOAT_BLOCK;
T
tensor-tang 已提交
260
  if (n < block) {
T
tensor-tang 已提交
261
    vec_add_bias<float, platform::isa_any>(n, a, x, y);
T
tensor-tang 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
    return;
  }
  const int rest = n % block;
  const int end = n - rest;
  int i = 0;
  __m256 bias = _mm256_set1_ps(a);
  __m256 tmp;
#define MOVE_ONE_STEP             \
  tmp = _mm256_loadu_ps(x + i);   \
  tmp = _mm256_add_ps(tmp, bias); \
  _mm256_storeu_ps(y + i, tmp)
  for (i = 0; i < end; i += block) {
    MOVE_ONE_STEP;
  }
#undef MOVE_ONE_STEP
  if (rest == 0) {
    return;
  }
  // can not continue move step if src and dst are inplace
  for (i = n - rest; i < n; ++i) {
    y[i] = x[i] + a;
  }
#else
T
tensor-tang 已提交
285
  vec_add_bias<float, platform::isa_any>(n, a, x, y);
T
tensor-tang 已提交
286 287 288 289
#endif
}

template <>
T
tensor-tang 已提交
290 291 292
inline void vec_add_bias<float, platform::avx2>(const int n, const float a,
                                                const float* x, float* y) {
  vec_add_bias<float, platform::avx>(n, a, x, y);
T
tensor-tang 已提交
293 294 295
}

template <>
T
tensor-tang 已提交
296 297
inline void vec_add_bias<float, platform::avx512f>(const int n, const float a,
                                                   const float* x, float* y) {
T
tensor-tang 已提交
298
  // TODO(TJ): enable me
T
tensor-tang 已提交
299
  vec_add_bias<float, platform::avx2>(n, a, x, y);
T
tensor-tang 已提交
300 301
}

T
tensor-tang 已提交
302
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
303 304 305 306 307
inline void vec_identity(const int n, const T* x, T* y) {
  // do nothing
  return;
}

T
tensor-tang 已提交
308
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
T
tensor-tang 已提交
309 310 311 312
inline void vec_sigmoid(const int n, const T* x, T* y) {
  const T min = SIGMOID_THRESHOLD_MIN;
  const T max = SIGMOID_THRESHOLD_MAX;
  for (int i = 0; i < n; ++i) {
T
tensor-tang 已提交
313 314 315 316 317 318
    y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]);
    y[i] = static_cast<T>(0) - y[i];
  }
  vec_exp<T>(n, y, y);
  for (int i = 0; i < n; ++i) {
    y[i] = static_cast<T>(1) / (static_cast<T>(1) + y[i]);
T
tensor-tang 已提交
319 320 321
  }
}

322
template <>
T
tensor-tang 已提交
323 324
inline void vec_sigmoid<float, platform::avx>(const int n, const float* x,
                                              float* y) {
325
#ifdef __AVX__
326
  constexpr int block = YMM_FLOAT_BLOCK;
327
  if (n < block) {
T
tensor-tang 已提交
328
    vec_sigmoid<float, platform::isa_any>(n, x, y);
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
    return;
  }
  const int rest = n % block;
  const int end = n - rest;
  int i = 0;
  __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX);
  __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN);
  __m256 zeros = _mm256_setzero_ps();
  __m256 tmp;
#define MOVE_ONE_STEP              \
  tmp = _mm256_loadu_ps(x + i);    \
  tmp = _mm256_max_ps(tmp, min);   \
  tmp = _mm256_min_ps(tmp, max);   \
  tmp = _mm256_sub_ps(zeros, tmp); \
  _mm256_storeu_ps(y + i, tmp)
  for (i = 0; i < end; i += block) {
    MOVE_ONE_STEP;
  }
347
#undef MOVE_ONE_STEP
348
  if (rest != 0) {
349 350 351 352 353 354
    // can not continue move step since the src and dst address could be equal
    const float xmin = SIGMOID_THRESHOLD_MIN;
    const float xmax = SIGMOID_THRESHOLD_MAX;
    for (i = n - rest; i < n; ++i) {
      y[i] = 0.f - ((x[i] < xmin) ? xmin : ((x[i] > xmax) ? xmax : x[i]));
    }
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
  }

  vec_exp<float>(n, y, y);

  __m256 ones = _mm256_set1_ps(1.0f);
#define MOVE_ONE_STEP             \
  tmp = _mm256_loadu_ps(y + i);   \
  tmp = _mm256_add_ps(ones, tmp); \
  tmp = _mm256_div_ps(ones, tmp); \
  _mm256_storeu_ps(y + i, tmp)
  for (i = 0; i < end; i += block) {
    MOVE_ONE_STEP;
  }
#undef MOVE_ONE_STEP
  if (rest == 0) {
    return;
  }
  // can not continue move step
  for (i = n - rest; i < n; ++i) {
    y[i] = 1.f / (1.f + y[i]);
  }
#else
T
tensor-tang 已提交
377
  vec_sigmoid<float, platform::isa_any>(n, x, y);
378 379 380 381
#endif
}

template <>
T
tensor-tang 已提交
382 383 384
inline void vec_sigmoid<float, platform::avx2>(const int n, const float* x,
                                               float* y) {
  vec_sigmoid<float, platform::avx>(n, x, y);
385 386 387
}

template <>
T
tensor-tang 已提交
388 389
inline void vec_sigmoid<float, platform::avx512f>(const int n, const float* x,
                                                  float* y) {
390
  // TODO(TJ): enable me
T
tensor-tang 已提交
391
  vec_sigmoid<float, platform::avx2>(n, x, y);
392 393
}

T
tensor-tang 已提交
394
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
T
tensor-tang 已提交
395
inline void vec_tanh(const int n, const T* x, T* y) {
396 397 398
  vec_scal<T, isa>(n, static_cast<T>(2), x, y);
  vec_sigmoid<T, isa>(n, y, y);
  vec_scal<T>(n, static_cast<T>(2), y);
T
tensor-tang 已提交
399
  vec_add_bias<T, isa>(n, static_cast<T>(-1), y, y);
T
tensor-tang 已提交
400 401
}

T
tensor-tang 已提交
402
// TODO(TJ): make relu clip
T
tensor-tang 已提交
403
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
T
tensor-tang 已提交
404 405 406 407 408 409
inline void vec_relu(const int n, const T* x, T* y) {
  for (int i = 0; i < n; ++i) {
    y[i] = x[i] > 0 ? x[i] : 0;
  }
}

T
tensor-tang 已提交
410
template <>
T
tensor-tang 已提交
411 412
inline void vec_relu<float, platform::avx>(const int n, const float* x,
                                           float* y) {
T
tensor-tang 已提交
413
#ifdef __AVX__
414
  constexpr int block = YMM_FLOAT_BLOCK;
T
tensor-tang 已提交
415
  if (n < block * 4) {
T
tensor-tang 已提交
416
    vec_relu<float, platform::isa_any>(n, x, y);
T
tensor-tang 已提交
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
    return;
  }

  const int rest = n % block;
  const int end = n - rest;
  int i = 0;
  __m256 zeros = _mm256_setzero_ps();
  __m256 tmp;
#define MOVE_ONE_STEP              \
  tmp = _mm256_loadu_ps(x + i);    \
  tmp = _mm256_max_ps(tmp, zeros); \
  _mm256_storeu_ps(y + i, tmp)
  for (i = 0; i < end; i += block) {
    MOVE_ONE_STEP;
  }
  if (rest == 0) {
    return;
  }
  i = n - block;
  MOVE_ONE_STEP;
#undef MOVE_ONE_STEP

#else
T
tensor-tang 已提交
440
  vec_relu<float, platform::isa_any>(n, x, y);
T
tensor-tang 已提交
441 442 443
#endif
}

T
tensor-tang 已提交
444
template <>
T
tensor-tang 已提交
445 446 447
inline void vec_relu<float, platform::avx2>(const int n, const float* x,
                                            float* y) {
  vec_relu<float, platform::avx>(n, x, y);
T
tensor-tang 已提交
448 449 450
}

template <>
T
tensor-tang 已提交
451 452
inline void vec_relu<float, platform::avx512f>(const int n, const float* x,
                                               float* y) {
453
  // TODO(TJ): enable me
T
tensor-tang 已提交
454
  vec_relu<float, platform::avx2>(n, x, y);
T
tensor-tang 已提交
455 456
}

T
tensor-tang 已提交
457 458
// TODO(TJ): optimize double of sigmoid, tanh and relu if necessary

T
tensor-tang 已提交
459
template <typename T, platform::cpu_isa_t isa = platform::isa_any>
460 461 462 463 464 465 466 467 468 469 470 471 472
class VecActivations {
 public:
  std::function<void(const int, const T*, T*)> operator()(
      const std::string& type) {
    if (type == "sigmoid") {
      return vec_sigmoid<T, isa>;
    } else if (type == "relu") {
      return vec_relu<T, isa>;
    } else if (type == "tanh") {
      return vec_tanh<T, isa>;
    } else if (type == "identity" || type == "") {
      return vec_identity<T, isa>;
    }
T
tensor-tang 已提交
473
    PADDLE_THROW("Not support type: %s", type);
474 475 476
  }
};

T
tensor-tang 已提交
477 478 479
}  // namespace math
}  // namespace operators
}  // namespace paddle