search_compute.h 5.6 KB
Newer Older
A
Aurelius84 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

W
Wilber 已提交
17 18
#if !defined(PADDLE_WITH_ARM) && !defined(PADDLE_WITH_SW) && \
    !defined(PADDLE_WITH_MIPS)
A
Aurelius84 已提交
19
#include <immintrin.h>
20
#endif
A
Aurelius84 已提交
21 22 23 24 25
#include <cfloat>
#include <cmath>
#include <cstring>

#include "paddle/fluid/operators/math/blas.h"
26
#include "paddle/pten/kernels/funcs/math_function.h"
A
Aurelius84 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using LoD = framework::LoD;

template <typename DeviceContext, typename T>
void call_gemm(const math::BlasT<DeviceContext, T>& blas,
               const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB,
               const int M, const int N, const int K, const T alpha, const T* A,
               const T* B, const T beta, T* C) {
  int lda = (TransA == CblasNoTrans) ? K : M;
  int ldb = (TransB == CblasNoTrans) ? N : K;
  blas.GEMM(TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, N);
}

template <typename T>
void call_gemm(const framework::ExecutionContext& ctx,
               const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB,
               const int M, const int N, const int K, const T alpha, const T* A,
               const T* B, const T beta, T* C) {
  int lda = (TransA == CblasNoTrans) ? K : M;
  int ldb = (TransB == CblasNoTrans) ? N : K;
  auto blas = math::GetBlas<platform::CPUDeviceContext, T>(ctx);
  blas.GEMM(TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, N);
}

template <typename DeviceContext, typename T>
void call_gemm_with_lda(const math::BlasT<DeviceContext, T>& blas,
                        const CBLAS_TRANSPOSE TransA,
                        const CBLAS_TRANSPOSE TransB, const int M, const int N,
                        const int K, const T alpha, const T* A, const T* B,
                        const T beta, T* C, int lda) {
  int ldb = (TransB == CblasNoTrans) ? N : K;

  blas.GEMM(TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, N);
}

template <typename T>
void call_gemm_batched(const framework::ExecutionContext& ctx,
                       const CBLAS_TRANSPOSE TransA,
                       const CBLAS_TRANSPOSE TransB, const int M, const int N,
                       const int K, const T alpha, const T** A, const T** B,
                       const T beta, T** C, const int batch) {
  for (int i = 0; i < batch; ++i) {
    call_gemm(ctx, TransA, TransB, M, N, K, alpha, A[i], B[i], beta, C[i]);
  }
}

W
Wilber 已提交
78 79
#if !defined(PADDLE_WITH_ARM) && !defined(PADDLE_WITH_SW) && \
    !defined(PADDLE_WITH_MIPS)
80

A
Aurelius84 已提交
81 82 83 84 85 86 87 88 89 90 91
#define __m256x __m256

static const unsigned int AVX_STEP_SIZE = 8;
static const unsigned int AVX_CUT_LEN_MASK = 7U;

#define _mm256_mul_px _mm256_mul_ps
#define _mm256_add_px _mm256_add_ps
#define _mm256_load_px _mm256_loadu_ps
#define _mm256_store_px _mm256_storeu_ps
#define _mm256_broadcast_sx _mm256_broadcast_ss

92
#define __m128x __m128
93

94 95 96 97 98 99 100 101 102
static const unsigned int SSE_STEP_SIZE = 2;
static const unsigned int SSE_CUT_LEN_MASK = 1U;

#define _mm_add_px _mm_add_ps
#define _mm_mul_px _mm_mul_ps
#define _mm_load_px _mm_loadu_ps
#define _mm_store_px _mm_storeu_ps
#define _mm_load1_px _mm_load1_ps

103 104
#endif

105 106
template <typename T>
inline void axpy(const T* x, T* y, size_t len, const T alpha) {
A
Aurelius84 已提交
107 108 109
  unsigned int jjj, lll;
  jjj = lll = 0;

110
#ifdef PADDLE_WITH_AVX
A
Aurelius84 已提交
111 112 113 114 115 116 117 118
  lll = len & ~AVX_CUT_LEN_MASK;
  __m256x mm_alpha = _mm256_broadcast_sx(&alpha);
  for (jjj = 0; jjj < lll; jjj += AVX_STEP_SIZE) {
    _mm256_store_px(
        y + jjj,
        _mm256_add_px(_mm256_load_px(y + jjj),
                      _mm256_mul_px(mm_alpha, _mm256_load_px(x + jjj))));
  }
W
Wilber 已提交
119 120
#elif defined(PADDLE_WITH_ARM) || defined(PADDLE_WITH_SW) || \
    defined(PADDLE_WITH_MIPS)
121
  PADDLE_THROW(platform::errors::Unimplemented("axpy is not supported"));
122 123 124 125 126 127 128
#else
  lll = len & ~SSE_CUT_LEN_MASK;
  __m128x mm_alpha = _mm_load1_px(&alpha);
  for (jjj = 0; jjj < lll; jjj += SSE_STEP_SIZE) {
    _mm_store_px(y + jjj,
                 _mm_add_px(_mm_load_px(y + jjj),
                            _mm_mul_px(mm_alpha, _mm_load_px(x + jjj))));
A
Aurelius84 已提交
129 130
  }

131
#endif
132 133 134 135 136 137

  for (; jjj < len; jjj++) {
    y[jjj] += alpha * x[jjj];
  }
}

138 139
template <typename T>
inline void axpy_noadd(const T* x, T* y, size_t len, const T alpha) {
A
Aurelius84 已提交
140 141 142
  unsigned int jjj, lll;
  jjj = lll = 0;

143
#ifdef PADDLE_WITH_AVX
A
Aurelius84 已提交
144 145 146 147 148
  lll = len & ~AVX_CUT_LEN_MASK;
  __m256x mm_alpha = _mm256_broadcast_sx(&alpha);
  for (jjj = 0; jjj < lll; jjj += AVX_STEP_SIZE) {
    _mm256_store_px(y + jjj, _mm256_mul_px(mm_alpha, _mm256_load_px(x + jjj)));
  }
W
Wilber 已提交
149 150
#elif defined(PADDLE_WITH_ARM) || defined(PADDLE_WITH_SW) || \
    defined(PADDLE_WITH_MIPS)
151
  PADDLE_THROW(platform::errors::Unimplemented("axpy_noadd is not supported"));
152 153 154 155 156 157 158 159
#else
  lll = len & ~SSE_CUT_LEN_MASK;
  __m128x mm_alpha = _mm_load1_px(&alpha);
  for (jjj = 0; jjj < lll; jjj += SSE_STEP_SIZE) {
    _mm_store_px(y + jjj, _mm_mul_px(mm_alpha, _mm_load_px(x + jjj)));
  }

#endif
A
Aurelius84 已提交
160 161 162 163 164

  for (; jjj < len; jjj++) {
    y[jjj] = alpha * x[jjj];
  }
}
165 166 167

inline void axpy_noadd(const int8_t* x, int8_t* y, size_t len,
                       const float alpha) {
168
  PADDLE_THROW(platform::errors::Unimplemented(
169
      "int8_t input of axpy_noadd is not supported"));
170
}
A
Aurelius84 已提交
171

A
Aurelius84 已提交
172 173
}  // namespace operators
}  // namespace paddle