activation.h 3.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <algorithm>
#include <cmath>
19 20
#include <string>
#include "common/enforce.h"
21 22 23
#include "common/types.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
24
#include "operators/math/math_func_neon.h"
25 26 27 28 29 30
#endif

namespace paddle_mobile {
namespace operators {
namespace math {

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
#define SIGMOID_THRESHOLD_MIN -40.0
#define SIGMOID_THRESHOLD_MAX 13.0
#define EXP_MAX_INPUT 40.0

inline ActivationType GetActivationType(const std::string &type) {
  if (type == "sigmoid") {
    return ActivationType::SIGMOID;
  } else if (type == "relu") {
    return ActivationType::RELU;
  } else if (type == "tanh") {
    return ActivationType::TANH;
  } else if (type == "identity" || type == "") {
    return ActivationType::IDENTITY;
  }
  PADDLE_MOBILE_THROW_EXCEPTION("Not support activation type.");
}

48
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
49
template <ActivationType Act = IDENTITY>
50 51 52 53 54
inline float32x4_t vActiveq_f32(const float32x4_t &x) {
  return x;
}

template <>
55
inline float32x4_t vActiveq_f32<RELU>(const float32x4_t &x) {
56 57 58 59 60
  float32x4_t __zero = vdupq_n_f32(0.f);
  return vmaxq_f32(x, __zero);
}

template <>
61
inline float32x4_t vActiveq_f32<RELU6>(const float32x4_t &x) {
62 63 64 65
  float32x4_t __zero = vdupq_n_f32(0.f);
  float32x4_t __six = vdupq_n_f32(6.f);
  return vminq_f32(vmaxq_f32(x, __zero), __six);
}
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88

template <>
inline float32x4_t vActiveq_f32<SIGMOID>(const float32x4_t &x) {
  float32x4_t __one = vdupq_n_f32(1.f);
  float32x4_t __x = vnegq_f32(x);
  __x = exp_ps(__x);
  __x = vaddq_f32(__x, __one);
  float32x4_t __out = vrecpeq_f32(__x);
  return vmulq_f32(vrecpsq_f32(__x, __out), __out);
}

template <>
inline float32x4_t vActiveq_f32<TANH>(const float32x4_t &x) {
  float32x4_t __one = vdupq_n_f32(1.f);
  float32x4_t __x = vnegq_f32(x);
  __x = vmulq_n_f32(__x, 2.f);
  __x = exp_ps(__x);
  __x = vaddq_f32(__x, __one);
  float32x4_t __out = vrecpeq_f32(__x);
  __out = vmulq_f32(vrecpsq_f32(__x, __out), __out);
  __out = vmulq_n_f32(__out, 2.f);
  return vsubq_f32(__out, __one);
}
89 90
#endif

91
template <ActivationType Act = IDENTITY>
92 93 94 95 96
inline float Active(const float &x) {
  return x;
}

template <>
97
inline float Active<RELU>(const float &x) {
98 99 100 101
  return std::max(x, 0.f);
}

template <>
102
inline float Active<RELU6>(const float &x) {
103 104 105
  return std::min(std::max(x, 0.f), 6.f);
}

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
template <>
inline float Active<SIGMOID>(const float &x) {
  //  float tmp = x > SIGMOID_THRESHOLD_MAX ? SIGMOID_THRESHOLD_MAX : x;
  //  tmp = x > SIGMOID_THRESHOLD_MIN ? x : SIGMOID_THRESHOLD_MIN;
  //  return 1.f / (1.f + exp(-tmp));
  return 1.f / (1.f + exp(-x));
}

template <>
inline float Active<TANH>(const float &x) {
  //  float tmp = -2.f * x;
  //  tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
  //  return (2.f / (1.f + exp(tmp))) - 1.f;
  return 2.f / (1.f + exp(-2.f * x)) - 1.f;
}

122 123 124
}  // namespace math
}  // namespace operators
}  // namespace paddle_mobile