funcs.h 14.1 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
H
hjchen2 已提交
14

W
wangliu 已提交
15
#pragma once
H
hjchen2 已提交
16

W
wangliu 已提交
17
#include <arm_neon.h>
18

Y
Yan Chunwei 已提交
19 20 21
#include <algorithm>
#include <cmath>

22 23 24 25 26 27 28
#include "lite/backends/arm/math/activation.h"
#include "lite/backends/arm/math/affine_channel.h"
#include "lite/backends/arm/math/anchor_generator.h"
#include "lite/backends/arm/math/argmax.h"
#include "lite/backends/arm/math/axpy.h"
#include "lite/backends/arm/math/beam_search.h"
#include "lite/backends/arm/math/box_coder.h"
C
cc 已提交
29
#include "lite/backends/arm/math/clip.h"
30 31
#include "lite/backends/arm/math/col_im_transform.h"
#include "lite/backends/arm/math/concat.h"
32 33
#include "lite/backends/arm/math/conv_block_utils.h"
#include "lite/backends/arm/math/conv_impl.h"
34 35 36 37
#include "lite/backends/arm/math/decode_bboxes.h"
#include "lite/backends/arm/math/dropout.h"
#include "lite/backends/arm/math/elementwise.h"
#include "lite/backends/arm/math/fill_bias_relu.h"
38 39 40
#include "lite/backends/arm/math/gemm_prepacked_int8.h"
#include "lite/backends/arm/math/gemm_s8.h"
#include "lite/backends/arm/math/gemv_arm_int8.h"
41 42 43
#include "lite/backends/arm/math/im2sequence.h"
#include "lite/backends/arm/math/increment.h"
#include "lite/backends/arm/math/interpolate.h"
44
#include "lite/backends/arm/math/layout.h"
45 46 47 48
#include "lite/backends/arm/math/lrn.h"
#include "lite/backends/arm/math/negative.h"
#include "lite/backends/arm/math/norm.h"
#include "lite/backends/arm/math/packed_sgemm.h"
49
#include "lite/backends/arm/math/packed_sgemm_c4.h"
50 51
#include "lite/backends/arm/math/pad2d.h"
#include "lite/backends/arm/math/pooling.h"
52
#include "lite/backends/arm/math/pow.h"
53 54 55
#include "lite/backends/arm/math/prior_box.h"
#include "lite/backends/arm/math/reduce_max.h"
#include "lite/backends/arm/math/reduce_mean.h"
J
juncaipeng 已提交
56
#include "lite/backends/arm/math/reduce_prod.h"
57
#include "lite/backends/arm/math/reduce_sum.h"
58
#include "lite/backends/arm/math/scale.h"
59
#include "lite/backends/arm/math/scatter.h"
60 61
#include "lite/backends/arm/math/sequence_expand.h"
#include "lite/backends/arm/math/sequence_pool.h"
62
#include "lite/backends/arm/math/sequence_pool_grad.h"
63 64 65 66 67 68 69
#include "lite/backends/arm/math/sequence_softmax.h"
#include "lite/backends/arm/math/sgemm.h"
#include "lite/backends/arm/math/sgemv.h"
#include "lite/backends/arm/math/shuffle_channel.h"
#include "lite/backends/arm/math/slice.h"
#include "lite/backends/arm/math/softmax.h"
#include "lite/backends/arm/math/split.h"
J
juncaipeng 已提交
70
#include "lite/backends/arm/math/split_merge_lod_tenosr.h"
71 72 73
#include "lite/backends/arm/math/stack.h"
#include "lite/backends/arm/math/topk.h"
#include "lite/backends/arm/math/yolo_box.h"
74

Y
Yan Chunwei 已提交
75 76 77 78
namespace paddle {
namespace lite {
namespace arm {
namespace math {
W
wangliu 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

#define c_inv_mant_mask ~0x7f800000u
#define c_cephes_SQRTHF 0.707106781186547524
#define c_cephes_log_p0 7.0376836292E-2
#define c_cephes_log_p1 -1.1514610310E-1
#define c_cephes_log_p2 1.1676998740E-1
#define c_cephes_log_p3 -1.2420140846E-1
#define c_cephes_log_p4 +1.4249322787E-1
#define c_cephes_log_p5 -1.6668057665E-1
#define c_cephes_log_p6 +2.0000714765E-1
#define c_cephes_log_p7 -2.4999993993E-1
#define c_cephes_log_p8 +3.3333331174E-1
#define c_cephes_log_q1 -2.12194440e-4
#define c_cephes_log_q2 0.693359375

Y
Yan Chunwei 已提交
94 95 96
// natural logarithm computed for 4 simultaneous float
// return NaN for x <= 0
inline float32x4_t log_ps(float32x4_t x) {
W
wangliu 已提交
97 98
  float32x4_t one = vdupq_n_f32(1);

Y
Yan Chunwei 已提交
99
  x = vmaxq_f32(x, vdupq_n_f32(0));  // force flush to zero on denormal values
W
wangliu 已提交
100 101 102 103 104 105
  uint32x4_t invalid_mask = vcleq_f32(x, vdupq_n_f32(0));

  int32x4_t ux = vreinterpretq_s32_f32(x);

  int32x4_t emm0 = vshrq_n_s32(ux, 23);

Y
Yan Chunwei 已提交
106
  // keep only the fractional part
W
wangliu 已提交
107 108 109 110 111 112 113 114 115
  ux = vandq_s32(ux, vdupq_n_s32(c_inv_mant_mask));
  ux = vorrq_s32(ux, vreinterpretq_s32_f32(vdupq_n_f32(0.5f)));
  x = vreinterpretq_f32_s32(ux);

  emm0 = vsubq_s32(emm0, vdupq_n_s32(0x7f));
  float32x4_t e = vcvtq_f32_s32(emm0);

  e = vaddq_f32(e, one);

Y
Yan Chunwei 已提交
116 117 118 119 120 121 122 123
  // part2:
  // if( x < SQRTHF ) {
  //   e -= 1;
  //   x = x + x - 1.0;
  // } else {
  //   x = x - 1.0;
  // }
  //
W
wangliu 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
  uint32x4_t mask = vcltq_f32(x, vdupq_n_f32(c_cephes_SQRTHF));
  float32x4_t tmp =
      vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(x), mask));
  x = vsubq_f32(x, one);
  e = vsubq_f32(
      e, vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(one), mask)));
  x = vaddq_f32(x, tmp);

  float32x4_t z = vmulq_f32(x, x);

  float32x4_t y = vdupq_n_f32(c_cephes_log_p0);
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p1));
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p2));
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p3));
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p4));
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p5));
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p6));
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p7));
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p8));
  y = vmulq_f32(y, x);

  y = vmulq_f32(y, z);

  tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q1));
  y = vaddq_f32(y, tmp);

  tmp = vmulq_f32(z, vdupq_n_f32(0.5f));
  y = vsubq_f32(y, tmp);

  tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q2));
  x = vaddq_f32(x, y);
  x = vaddq_f32(x, tmp);
  x = vreinterpretq_f32_u32(vorrq_u32(
      vreinterpretq_u32_f32(x), invalid_mask));  // negative arg will be NAN
  return x;
}

#define c_exp_hi 88.3762626647949f
#define c_exp_lo -88.3762626647949f

#define c_cephes_LOG2EF 1.44269504088896341
#define c_cephes_exp_C1 0.693359375
#define c_cephes_exp_C2 -2.12194440e-4

#define c_cephes_exp_p0 1.9875691500E-4
#define c_cephes_exp_p1 1.3981999507E-3
#define c_cephes_exp_p2 8.3334519073E-3
#define c_cephes_exp_p3 4.1665795894E-2
#define c_cephes_exp_p4 1.6666665459E-1
#define c_cephes_exp_p5 5.0000001201E-1

Y
Yan Chunwei 已提交
183 184
// exp() computed for 4 float at once
inline float32x4_t exp_ps(float32x4_t x) {
W
wangliu 已提交
185 186 187 188 189 190
  float32x4_t tmp, fx;

  float32x4_t one = vdupq_n_f32(1);
  x = vminq_f32(x, vdupq_n_f32(c_exp_hi));
  x = vmaxq_f32(x, vdupq_n_f32(c_exp_lo));

Y
Yan Chunwei 已提交
191
  // express exp(x) as exp(g + n*log(2))
W
wangliu 已提交
192 193
  fx = vmlaq_f32(vdupq_n_f32(0.5f), x, vdupq_n_f32(c_cephes_LOG2EF));

Y
Yan Chunwei 已提交
194
  // perform a floorf
W
wangliu 已提交
195 196
  tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));

Y
Yan Chunwei 已提交
197
  // if greater, substract 1
W
wangliu 已提交
198 199 200 201 202 203 204 205 206 207
  uint32x4_t mask = vcgtq_f32(tmp, fx);
  mask = vandq_u32(mask, vreinterpretq_u32_f32(one));

  fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));

  tmp = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C1));
  float32x4_t z = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C2));
  x = vsubq_f32(x, tmp);
  x = vsubq_f32(x, z);

Y
Yan Chunwei 已提交
208 209 210 211 212 213
  static const float cephes_exp_p[6] = {c_cephes_exp_p0,
                                        c_cephes_exp_p1,
                                        c_cephes_exp_p2,
                                        c_cephes_exp_p3,
                                        c_cephes_exp_p4,
                                        c_cephes_exp_p5};
W
wangliu 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
  float32x4_t y = vld1q_dup_f32(cephes_exp_p + 0);
  float32x4_t c1 = vld1q_dup_f32(cephes_exp_p + 1);
  float32x4_t c2 = vld1q_dup_f32(cephes_exp_p + 2);
  float32x4_t c3 = vld1q_dup_f32(cephes_exp_p + 3);
  float32x4_t c4 = vld1q_dup_f32(cephes_exp_p + 4);
  float32x4_t c5 = vld1q_dup_f32(cephes_exp_p + 5);

  y = vmulq_f32(y, x);
  z = vmulq_f32(x, x);

  y = vaddq_f32(y, c1);
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, c2);
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, c3);
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, c4);
  y = vmulq_f32(y, x);
  y = vaddq_f32(y, c5);

  y = vmulq_f32(y, z);
  y = vaddq_f32(y, x);
  y = vaddq_f32(y, one);

Y
Yan Chunwei 已提交
238
  // build 2^n
W
wangliu 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
  int32x4_t mm;
  mm = vcvtq_s32_f32(fx);
  mm = vaddq_s32(mm, vdupq_n_s32(0x7f));
  mm = vshlq_n_s32(mm, 23);
  float32x4_t pow2n = vreinterpretq_f32_s32(mm);

  y = vmulq_f32(y, pow2n);
  return y;
}

#define c_minus_cephes_DP1 -0.78515625
#define c_minus_cephes_DP2 -2.4187564849853515625e-4
#define c_minus_cephes_DP3 -3.77489497744594108e-8
#define c_sincof_p0 -1.9515295891E-4
#define c_sincof_p1 8.3321608736E-3
#define c_sincof_p2 -1.6666654611E-1
#define c_coscof_p0 2.443315711809948E-005
#define c_coscof_p1 -1.388731625493765E-003
#define c_coscof_p2 4.166664568298827E-002
#define c_cephes_FOPI 1.27323954473516  // 4 / M_PI

Y
Yan Chunwei 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
// evaluation of 4 sines & cosines at once.
//
// The code is the exact rewriting of the cephes sinf function.
// Precision is excellent as long as x < 8192 (I did not bother to
// take into account the special handling they have for greater values
// -- it does not return garbage for arguments over 8192, though, but
// the extra precision is missing).
//
// Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
// surprising but correct result.
//
// Note also that when you compute sin(x), cos(x) is available at
// almost no extra price so both sin_ps and cos_ps make use of
// sincos_ps..
//
275
inline void sincos_ps(float32x4_t x, float32x4_t* ysin, float32x4_t* ycos) {
W
wangliu 已提交
276 277 278 279 280 281 282 283 284
  // any x
  float32x4_t xmm1, xmm2, xmm3, y;

  uint32x4_t emm2;

  uint32x4_t sign_mask_sin, sign_mask_cos;
  sign_mask_sin = vcltq_f32(x, vdupq_n_f32(0));
  x = vabsq_f32(x);

Y
Yan Chunwei 已提交
285
  // scale by 4/Pi
W
wangliu 已提交
286 287
  y = vmulq_f32(x, vdupq_n_f32(c_cephes_FOPI));

Y
Yan Chunwei 已提交
288
  // store the integer part of y in mm0
W
wangliu 已提交
289
  emm2 = vcvtq_u32_f32(y);
Y
Yan Chunwei 已提交
290
  // j=(j+1) & (~1) (see the cephes sources)
W
wangliu 已提交
291 292 293 294
  emm2 = vaddq_u32(emm2, vdupq_n_u32(1));
  emm2 = vandq_u32(emm2, vdupq_n_u32(~1));
  y = vcvtq_f32_u32(emm2);

Y
Yan Chunwei 已提交
295 296 297
  // get the polynom selection mask
  // there is one polynom for 0 <= x <= Pi/4
  // and another one for Pi/4<x<=Pi/2
W
wangliu 已提交
298 299
  uint32x4_t poly_mask = vtstq_u32(emm2, vdupq_n_u32(2));

Y
Yan Chunwei 已提交
300 301
  // the magic pass: "Extended precision modular arithmetic"
  // x = ((x - y * DP1) - y * DP2) - y * DP3;
W
wangliu 已提交
302 303 304 305 306 307 308 309 310 311
  xmm1 = vmulq_n_f32(y, c_minus_cephes_DP1);
  xmm2 = vmulq_n_f32(y, c_minus_cephes_DP2);
  xmm3 = vmulq_n_f32(y, c_minus_cephes_DP3);
  x = vaddq_f32(x, xmm1);
  x = vaddq_f32(x, xmm2);
  x = vaddq_f32(x, xmm3);

  sign_mask_sin = veorq_u32(sign_mask_sin, vtstq_u32(emm2, vdupq_n_u32(4)));
  sign_mask_cos = vtstq_u32(vsubq_u32(emm2, vdupq_n_u32(2)), vdupq_n_u32(4));

Y
Yan Chunwei 已提交
312 313
  // evaluate the first polynom  (0 <= x <= Pi/4) in y1,
  // and the second polynom      (Pi/4 <= x <= 0) in y2
W
wangliu 已提交
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
  float32x4_t z = vmulq_f32(x, x);
  float32x4_t y1, y2;

  y1 = vmulq_n_f32(z, c_coscof_p0);
  y2 = vmulq_n_f32(z, c_sincof_p0);
  y1 = vaddq_f32(y1, vdupq_n_f32(c_coscof_p1));
  y2 = vaddq_f32(y2, vdupq_n_f32(c_sincof_p1));
  y1 = vmulq_f32(y1, z);
  y2 = vmulq_f32(y2, z);
  y1 = vaddq_f32(y1, vdupq_n_f32(c_coscof_p2));
  y2 = vaddq_f32(y2, vdupq_n_f32(c_sincof_p2));
  y1 = vmulq_f32(y1, z);
  y2 = vmulq_f32(y2, z);
  y1 = vmulq_f32(y1, z);
  y2 = vmulq_f32(y2, x);
  y1 = vsubq_f32(y1, vmulq_f32(z, vdupq_n_f32(0.5f)));
  y2 = vaddq_f32(y2, x);
  y1 = vaddq_f32(y1, vdupq_n_f32(1));

Y
Yan Chunwei 已提交
333
  // select the correct result from the two polynoms
W
wangliu 已提交
334 335 336 337 338 339
  float32x4_t ys = vbslq_f32(poly_mask, y1, y2);
  float32x4_t yc = vbslq_f32(poly_mask, y2, y1);
  *ysin = vbslq_f32(sign_mask_sin, vnegq_f32(ys), ys);
  *ycos = vbslq_f32(sign_mask_cos, yc, vnegq_f32(yc));
}

Y
Yan Chunwei 已提交
340
inline float32x4_t sin_ps(float32x4_t x) {
W
wangliu 已提交
341 342 343 344 345
  float32x4_t ysin, ycos;
  sincos_ps(x, &ysin, &ycos);
  return ysin;
}

Y
Yan Chunwei 已提交
346
inline float32x4_t cos_ps(float32x4_t x) {
W
wangliu 已提交
347 348 349 350 351
  float32x4_t ysin, ycos;
  sincos_ps(x, &ysin, &ycos);
  return ycos;
}

Y
Yan Chunwei 已提交
352
inline float32x4_t div_ps(float32x4_t a, float32x4_t b) {
W
wangliu 已提交
353 354 355 356 357
  float32x4_t reciprocal = vrecpeq_f32(b);
  reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
  return vmulq_f32(a, reciprocal);
}

Y
Yan Chunwei 已提交
358 359
inline float32x4_t pow_ps(float32x4_t a, float32x4_t b) {
  // pow(x, m) = exp(m * log(x))
W
wangliu 已提交
360 361
  return exp_ps(vmulq_f32(b, log_ps(a)));
}
H
hjchen2 已提交
362

363 364 365 366 367 368 369 370 371
inline float32x4_t vpaddq_f32(float32x4_t a, float32x4_t b) {
  float32x4_t vrst;
  vrst[0] = a[0] + a[1];
  vrst[1] = a[2] + a[3];
  vrst[2] = b[0] + b[1];
  vrst[3] = b[2] + b[3];
  return vrst;
}

Y
Yan Chunwei 已提交
372
template <typename T>
373 374
void fill_bias_fc(
    T* tensor, const T* bias, int num, int channel, bool flag_relu);
Y
Yan Chunwei 已提交
375 376

template <lite_api::ActivationType Act = lite_api::ActivationType::kIndentity>
377
inline float32x4_t vactive_f32(const float32x4_t& x) {
Y
Yan Chunwei 已提交
378 379 380 381 382
  return x;
}

template <>
inline float32x4_t vactive_f32<lite_api::ActivationType::kRelu>(
383
    const float32x4_t& x) {
Y
Yan Chunwei 已提交
384 385 386 387 388 389
  float32x4_t __zero = vdupq_n_f32(0.f);
  return vmaxq_f32(x, __zero);
}

template <>
inline float32x4_t vactive_f32<lite_api::ActivationType::kRelu6>(
390
    const float32x4_t& x) {
Y
Yan Chunwei 已提交
391 392 393 394 395 396 397
  float32x4_t __zero = vdupq_n_f32(0.f);
  float32x4_t __six = vdupq_n_f32(6.f);
  return vminq_f32(vmaxq_f32(x, __zero), __six);
}

template <>
inline float32x4_t vactive_f32<lite_api::ActivationType::kSigmoid>(
398
    const float32x4_t& x) {
Y
Yan Chunwei 已提交
399 400 401 402 403 404 405 406 407 408
  float32x4_t __one = vdupq_n_f32(1.f);
  float32x4_t __x = vnegq_f32(x);
  __x = exp_ps(__x);
  __x = vaddq_f32(__x, __one);
  float32x4_t __out = vrecpeq_f32(__x);
  return vmulq_f32(vrecpsq_f32(__x, __out), __out);
}

template <>
inline float32x4_t vactive_f32<lite_api::ActivationType::kTanh>(
409
    const float32x4_t& x) {
Y
Yan Chunwei 已提交
410 411 412 413 414 415 416 417 418 419 420
  float32x4_t __one = vdupq_n_f32(1.f);
  float32x4_t __x = vmulq_n_f32(x, -2.f);
  __x = exp_ps(__x);
  __x = vaddq_f32(__x, __one);
  float32x4_t __out = vrecpeq_f32(__x);
  __out = vmulq_f32(vrecpsq_f32(__x, __out), __out);
  __out = vmulq_n_f32(__out, 2.f);
  return vsubq_f32(__out, __one);
}

template <lite_api::ActivationType Act = lite_api::ActivationType::kIndentity>
421
inline float active_f32(const float& x) {
Y
Yan Chunwei 已提交
422 423 424 425
  return x;
}

template <>
426
inline float active_f32<lite_api::ActivationType::kRelu>(const float& x) {
Y
Yan Chunwei 已提交
427 428 429 430
  return std::max(x, 0.f);
}

template <>
431
inline float active_f32<lite_api::ActivationType::kRelu6>(const float& x) {
Y
Yan Chunwei 已提交
432 433 434 435
  return std::min(std::max(x, 0.f), 6.f);
}

template <>
436
inline float active_f32<lite_api::ActivationType::kSigmoid>(const float& x) {
Y
Yan Chunwei 已提交
437
  return 1.f / (1.f + exp(-x));
438 439
}

Y
Yan Chunwei 已提交
440
template <>
441
inline float active_f32<lite_api::ActivationType::kTanh>(const float& x) {
Y
Yan Chunwei 已提交
442
  return 2.f / (1.f + exp(-2.f * x)) - 1.f;
443 444
}

Y
Yan Chunwei 已提交
445 446 447 448
}  // namespace math
}  // namespace arm
}  // namespace lite
}  // namespace paddle