math_function.cpp 6.9 KB
Newer Older
Z
zhaojiaying01 已提交
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
朔-望's avatar
朔-望 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Z
zhaojiaying01 已提交
15
#include "operators/math/math_function.h"
16
#include <string>
H
hjchen2 已提交
17
#include "common/enforce.h"
L
lijiancheng0614 已提交
18 19
#include "framework/data_type.h"
#include "framework/tensor.h"
Z
zhaojiaying01 已提交
20
#include "operators/math/gemm.h"
H
backup  
hjchen2 已提交
21
#include "operators/math/gemm/cblas.h"
朔-望's avatar
朔-望 已提交
22 23

namespace paddle_mobile {
朔-望's avatar
朔-望 已提交
24 25 26
namespace operators {
namespace math {

L
lijiancheng0614 已提交
27 28 29 30 31 32 33 34 35 36 37 38
struct TensorSetConstant {
  TensorSetConstant(framework::Tensor *tensor, float value)
      : tensor_(tensor), value_(value) {}
  template <typename T>
  void apply() const {
    auto *begin = tensor_->mutable_data<T>();
    std::fill(begin, begin + tensor_->numel(), static_cast<T>(value_));
  }
  framework::Tensor *tensor_;
  float value_;
};

H
hjchen2 已提交
39
void SetConstant(framework::Tensor *tensor, float value) {
L
lijiancheng0614 已提交
40 41 42 43
  framework::VisitDataType(framework::ToDataType(tensor->type()),
                           TensorSetConstant(tensor, value));
}

朔-望's avatar
朔-望 已提交
44
template <>
H
hjchen2 已提交
45
void MatMul<float, float>(const framework::Tensor &matrix_a, bool trans_a,
46 47 48
                          const framework::Tensor &matrix_b, bool trans_b,
                          float alpha, framework::Tensor *matrix_out,
                          float beta, bool relu, float *bias) {
49 50 51
  auto dim_a = matrix_a.dims();
  auto dim_b = matrix_b.dims();
  auto dim_out = matrix_out->dims();
Z
zhaojiaying01 已提交
52 53
  PADDLE_MOBILE_ENFORCE(
      dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
H
hjchen2 已提交
54
      "The input and output of MatMul be matrix");
55 56 57

  int M = dim_out[0];
  int N = dim_out[1];
58
  int K = (!trans_a) ? dim_a[1] : dim_a[0];
H
backup  
hjchen2 已提交
59

60
  Gemm gemm;
61
  if (trans_a) {
62
    framework::Tensor matrix_trans;
63 64 65 66
    int numel = matrix_a.numel();
    int m = matrix_a.dims()[0];
    int n = matrix_a.dims()[1];
    float *tmp = (float *)(matrix_a.data<float>());  // NOLINT
67
    float *a = matrix_trans.mutable_data<float>(matrix_a.dims());
68 69 70 71 72 73
    int index = 0;
    for (int j = 0; j < n; j++) {
      for (int i = 0; i < m; i++) {
        a[index++] = tmp[i * n + j];
      }
    }
H
hjchen2 已提交
74
    if (M == 1) {
75
#ifdef _OPENMP
H
backup  
hjchen2 已提交
76 77
      gemm.Sgemm_omp(M, N, K, alpha, a, K, matrix_b.data<float>(), N, beta,
                     matrix_out->data<float>(), N, relu, bias);
78
#else
H
backup  
hjchen2 已提交
79 80
      gemm.Sgemm(M, N, K, alpha, a, K, matrix_b.data<float>(), N, beta,
                 matrix_out->data<float>(), N, relu, bias);
81
#endif
H
backup  
hjchen2 已提交
82 83 84 85
    } else {
      cblas_sgemm(false, false, M, N, K, alpha, a, K, matrix_b.data<float>(), N,
                  beta, matrix_out->data<float>(), N);
    }
86
  } else {
H
hjchen2 已提交
87
    if (M == 1) {
88
#ifdef _OPENMP
H
backup  
hjchen2 已提交
89 90 91
      gemm.Sgemm_omp(M, N, K, alpha, matrix_a.data<float>(), K,
                     matrix_b.data<float>(), N, beta, matrix_out->data<float>(),
                     N, relu, bias);
92
#else
H
backup  
hjchen2 已提交
93 94 95
      gemm.Sgemm(M, N, K, alpha, matrix_a.data<float>(), K,
                 matrix_b.data<float>(), N, beta, matrix_out->data<float>(), N,
                 relu, bias);
96
#endif
H
backup  
hjchen2 已提交
97 98 99 100 101
    } else {
      cblas_sgemm(false, false, M, N, K, alpha, matrix_a.data<float>(), K,
                  matrix_b.data<float>(), N, beta, matrix_out->data<float>(),
                  N);
    }
102
  }
103
}
朔-望's avatar
朔-望 已提交
104

H
hjchen2 已提交
105 106 107 108 109
void MatMulWithBn(const framework::Tensor &matrix_a, bool trans_a,
                  const framework::Tensor &matrix_b, bool trans_b, float alpha,
                  framework::Tensor *matrix_out, float beta, bool relu,
                  framework::Tensor *new_scale, framework::Tensor *new_bias,
                  int group, float *bias) {
110
  Gemm gemm;
111 112 113
  auto dim_a = matrix_a.dims();
  auto dim_b = matrix_b.dims();
  auto dim_out = matrix_out->dims();
Z
zhaojiaying01 已提交
114 115
  PADDLE_MOBILE_ENFORCE(
      dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
H
hjchen2 已提交
116
      "The input and output of MatMul be matrix");
117 118 119

  int M = dim_out[0];
  int N = dim_out[1];
120 121
  int K = (!trans_a) ? dim_a[1] : dim_a[0];

122
#ifdef _OPENMP
123 124 125 126
  gemm.SgemmWithBn_omp(
      M, N, K, alpha, matrix_a.data<float>(), K, matrix_b.data<float>(), N,
      beta, matrix_out->data<float>(), N, relu,
      new_scale->data<float>() + group, new_bias->data<float>() + group, bias);
127
#else
128 129 130 131
  gemm.SgemmWithBn(M, N, K, alpha, matrix_a.data<float>(), K,
                   matrix_b.data<float>(), N, beta, matrix_out->data<float>(),
                   N, relu, new_scale->data<float>() + group,
                   new_bias->data<float>() + group, bias);
132 133
#endif
}
H
hjchen2 已提交
134
void MatMulWithPRelu(const framework::Tensor &matrix_a, bool trans_a,
135 136 137
                     const framework::Tensor &matrix_b, bool trans_b,
                     framework::Tensor *matrix_out, float *p, std::string mode,
                     float *bias, float *bias1) {
138
  Gemm gemm;
139 140 141
  auto dim_a = matrix_a.dims();
  auto dim_b = matrix_b.dims();
  auto dim_out = matrix_out->dims();
Z
zhaojiaying01 已提交
142 143
  PADDLE_MOBILE_ENFORCE(
      dim_a.size() == 2 && dim_b.size() == 2 && dim_out.size() == 2,
H
hjchen2 已提交
144
      "The input and output of MatMul be matrix");
145 146 147 148 149 150

  int M = dim_out[0];
  int N = dim_out[1];
  int K = (!trans_a) ? dim_a[1] : dim_a[0];

#ifdef _OPENMP
151 152 153
  gemm.SgemmWithPRelu_omp(M, N, K, matrix_a.data<float>(), K,
                          matrix_b.data<float>(), N, matrix_out->data<float>(),
                          N, p, mode, bias, bias1);
154
#else
155 156 157
  gemm.SgemmWithPRelu(M, N, K, matrix_a.data<float>(), K,
                      matrix_b.data<float>(), N, matrix_out->data<float>(), N,
                      p, mode, bias, bias1);
158 159
#endif
}
朔-望's avatar
朔-望 已提交
160

xiebaiyuan's avatar
xiebaiyuan 已提交
161 162 163 164
template <typename T>
struct ClearTensor<CPU, T> {
  void operator()(framework::Tensor *tensor) {
    auto size = tensor->numel();
Z
Zhen Wang 已提交
165
    auto *tensor_data = tensor->data<T>();
166
    memset((void *)tensor_data, 0, sizeof(T) * size);  // NOLINT
xiebaiyuan's avatar
xiebaiyuan 已提交
167 168 169 170 171 172 173 174 175 176 177 178 179 180
  }
};

template <typename T>
struct RowwiseAdd<CPU, T> {
  void operator()(const framework::Tensor &input,
                  const framework::Tensor &vector, framework::Tensor *output) {
    auto in_dims = input.dims();
    auto size = input.numel() / in_dims[0];
    PADDLE_MOBILE_ENFORCE((vector.numel() == size),
                          "vector.numel() must be equal to size.");
    PADDLE_MOBILE_ENFORCE((output->dims() == in_dims),
                          "output->dims() must be equal to in_dims.");

Z
Zhen Wang 已提交
181 182 183
    auto *input_data = input.data<T>();
    auto *out_data = output->data<T>();
    auto *vec_data = vector.data<T>();
xiebaiyuan's avatar
xiebaiyuan 已提交
184 185 186 187 188 189 190 191 192 193 194
    for (int64_t i = 0; i < in_dims[0]; ++i) {
      for (int64_t j = 0; j < size; ++j) {
        out_data[i * size + j] = input_data[i * size + j] + vec_data[j];
      }
    }
  }
};

template struct RowwiseAdd<CPU, float>;
template struct ClearTensor<CPU, float>;

朔-望's avatar
朔-望 已提交
195 196 197
}  // namespace math
}  // namespace operators
}  // namespace paddle_mobile