math_function.cpp 5.7 KB
Newer Older
Z
zhaojiaying01 已提交
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
朔-望's avatar
朔-望 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Z
zhaojiaying01 已提交
15
#include "operators/math/math_function.h"
xiebaiyuan's avatar
xiebaiyuan 已提交
16
#include <cstring>
17
#include <string>
Z
zhaojiaying01 已提交
18
#include "operators/math/gemm.h"
朔-望's avatar
朔-望 已提交
19 20

namespace paddle_mobile {
朔-望's avatar
朔-望 已提交
21 22 23
namespace operators {
namespace math {

朔-望's avatar
朔-望 已提交
24
template <>
朔-望's avatar
朔-望 已提交
25
void matmul<float>(const framework::Tensor &matrix_a, bool trans_a,
朔-望's avatar
朔-望 已提交
26
                   const framework::Tensor &matrix_b, bool trans_b, float alpha,
27 28
                   framework::Tensor *matrix_out, float beta, bool relu,
                   float *bias) {
29 30 31
  auto dim_a = matrix_a.dims();
  auto dim_b = matrix_b.dims();
  auto dim_out = matrix_out->dims();
Z
zhaojiaying01 已提交
32 33 34
  PADDLE_MOBILE_ENFORCE(
      dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
      "The input and output of matmul be matrix");
35 36 37

  int M = dim_out[0];
  int N = dim_out[1];
38
  int K = (!trans_a) ? dim_a[1] : dim_a[0];
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
  if (trans_a) {
    int numel = matrix_a.numel();
    int m = matrix_a.dims()[0];
    int n = matrix_a.dims()[1];
    float *tmp = (float *)(matrix_a.data<float>());  // NOLINT
    float *a = static_cast<float *>(
        paddle_mobile::memory::Alloc(sizeof(float) * numel));
    int index = 0;
    for (int j = 0; j < n; j++) {
      for (int i = 0; i < m; i++) {
        a[index++] = tmp[i * n + j];
      }
    }
#ifdef _OPENMP
    Sgemm_omp(M, N, K, alpha, a, K, matrix_b.data<float>(), N, beta,
              matrix_out->data<float>(), N, relu, bias);
#else
    Sgemm(M, N, K, alpha, a, K, matrix_b.data<float>(), N, beta,
          matrix_out->data<float>(), N, relu, bias);
#endif
  } else {
61
#ifdef _OPENMP
62 63
    Sgemm_omp(M, N, K, alpha, matrix_a.data<float>(), K, matrix_b.data<float>(),
              N, beta, matrix_out->data<float>(), N, relu, bias);
64
#else
65 66
    Sgemm(M, N, K, alpha, matrix_a.data<float>(), K, matrix_b.data<float>(), N,
          beta, matrix_out->data<float>(), N, relu, bias);
67
#endif
68
  }
69
}
朔-望's avatar
朔-望 已提交
70

朔-望's avatar
朔-望 已提交
71
template <>
72 73 74 75
void matmulWithBn<float>(const framework::Tensor &matrix_a, bool trans_a,
                         const framework::Tensor &matrix_b, bool trans_b,
                         float alpha, framework::Tensor *matrix_out, float beta,
                         bool relu, framework::Tensor *new_scale,
Y
yangfei 已提交
76
                         framework::Tensor *new_bias, int group, float *bias) {
77 78 79
  auto dim_a = matrix_a.dims();
  auto dim_b = matrix_b.dims();
  auto dim_out = matrix_out->dims();
Z
zhaojiaying01 已提交
80 81 82
  PADDLE_MOBILE_ENFORCE(
      dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
      "The input and output of matmul be matrix");
83 84 85

  int M = dim_out[0];
  int N = dim_out[1];
86 87
  int K = (!trans_a) ? dim_a[1] : dim_a[0];

88 89 90 91
#ifdef _OPENMP
  SgemmWithBn_omp(M, N, K, alpha, matrix_a.data<float>(), K,
                  matrix_b.data<float>(), N, beta, matrix_out->data<float>(), N,
                  relu, new_scale->data<float>() + group,
Y
yangfei 已提交
92
                  new_bias->data<float>() + group, bias);
93
#else
Z
zhaojiaying01 已提交
94 95
  SgemmWithBn(M, N, K, alpha, matrix_a.data<float>(), K, matrix_b.data<float>(),
              N, beta, matrix_out->data<float>(), N, relu,
Y
yangfei 已提交
96 97
              new_scale->data<float>() + group, new_bias->data<float>() + group,
              bias);
98 99
#endif
}
100 101 102 103 104 105 106
void matmulWithPRelu(const framework::Tensor &matrix_a, bool trans_a,
                     const framework::Tensor &matrix_b, bool trans_b,
                     framework::Tensor *matrix_out, float *p, std::string mode,
                     float *bias, float *bias1) {
  auto dim_a = matrix_a.dims();
  auto dim_b = matrix_b.dims();
  auto dim_out = matrix_out->dims();
Z
zhaojiaying01 已提交
107 108 109
  PADDLE_MOBILE_ENFORCE(
      dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
      "The input and output of matmul be matrix");
110 111 112 113 114 115

  int M = dim_out[0];
  int N = dim_out[1];
  int K = (!trans_a) ? dim_a[1] : dim_a[0];

#ifdef _OPENMP
116 117
  SgemmWithPRelu_omp(M, N, K, matrix_a.data<float>(), K, matrix_b.data<float>(),
                     N, matrix_out->data<float>(), N, p, mode, bias, bias1);
118 119 120 121 122 123
#else
  SgemmWithPRelu(M, N, K, matrix_a.data<float>(), K, matrix_b.data<float>(), N,
                 matrix_out->data<float>(), N, p, mode, bias, bias1);

#endif
}
朔-望's avatar
朔-望 已提交
124

xiebaiyuan's avatar
xiebaiyuan 已提交
125 126 127 128 129
template <typename T>
struct ClearTensor<CPU, T> {
  void operator()(framework::Tensor *tensor) {
    auto size = tensor->numel();
    auto *tensor_data = tensor->data<float>();
130
    memset((void *)tensor_data, 0, sizeof(T) * size);  // NOLINT
xiebaiyuan's avatar
xiebaiyuan 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
  }
};

template <typename T>
struct RowwiseAdd<CPU, T> {
  void operator()(const framework::Tensor &input,
                  const framework::Tensor &vector, framework::Tensor *output) {
    auto in_dims = input.dims();
    auto size = input.numel() / in_dims[0];
    PADDLE_MOBILE_ENFORCE((vector.numel() == size),
                          "vector.numel() must be equal to size.");
    PADDLE_MOBILE_ENFORCE((output->dims() == in_dims),
                          "output->dims() must be equal to in_dims.");

    auto *input_data = input.data<float>();
    auto *out_data = output->data<float>();
    auto *vec_data = vector.data<float>();
    for (int64_t i = 0; i < in_dims[0]; ++i) {
      for (int64_t j = 0; j < size; ++j) {
        out_data[i * size + j] = input_data[i * size + j] + vec_data[j];
      }
    }
  }
};

template struct RowwiseAdd<CPU, float>;
template struct ClearTensor<CPU, float>;

朔-望's avatar
朔-望 已提交
159 160 161
}  // namespace math
}  // namespace operators
}  // namespace paddle_mobile