activation.h 3.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <algorithm>
#include <cmath>
19 20
#include <string>
#include "common/enforce.h"
21 22 23
#include "common/types.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
24
#include "operators/math/math_func_neon.h"
25 26 27 28 29 30
#endif

namespace paddle_mobile {
namespace operators {
namespace math {

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
#define SIGMOID_THRESHOLD_MIN -40.0
#define SIGMOID_THRESHOLD_MAX 13.0
#define EXP_MAX_INPUT 40.0

inline ActivationType GetActivationType(const std::string &type) {
  if (type == "sigmoid") {
    return ActivationType::SIGMOID;
  } else if (type == "relu") {
    return ActivationType::RELU;
  } else if (type == "tanh") {
    return ActivationType::TANH;
  } else if (type == "identity" || type == "") {
    return ActivationType::IDENTITY;
  }
  PADDLE_MOBILE_THROW_EXCEPTION("Not support activation type.");
}

Z
zhaojiaying01 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60
inline ActivationType GetActivationType(const int type) {
  if (type == 0) {
    return ActivationType::IDENTITY;
  } else if (type == 1) {
    return ActivationType::SIGMOID;
  } else if (type == 2) {
    return ActivationType::TANH;
  } else if (type == 3) {
    return ActivationType::RELU;
  }
  PADDLE_MOBILE_THROW_EXCEPTION("Not support activation type.");
}

61
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
62
template <ActivationType Act = IDENTITY>
63 64 65 66 67
inline float32x4_t vActiveq_f32(const float32x4_t &x) {
  return x;
}

template <>
68
inline float32x4_t vActiveq_f32<RELU>(const float32x4_t &x) {
69 70 71 72 73
  float32x4_t __zero = vdupq_n_f32(0.f);
  return vmaxq_f32(x, __zero);
}

template <>
74
inline float32x4_t vActiveq_f32<RELU6>(const float32x4_t &x) {
75 76 77 78
  float32x4_t __zero = vdupq_n_f32(0.f);
  float32x4_t __six = vdupq_n_f32(6.f);
  return vminq_f32(vmaxq_f32(x, __zero), __six);
}
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

template <>
inline float32x4_t vActiveq_f32<SIGMOID>(const float32x4_t &x) {
  float32x4_t __one = vdupq_n_f32(1.f);
  float32x4_t __x = vnegq_f32(x);
  __x = exp_ps(__x);
  __x = vaddq_f32(__x, __one);
  float32x4_t __out = vrecpeq_f32(__x);
  return vmulq_f32(vrecpsq_f32(__x, __out), __out);
}

template <>
inline float32x4_t vActiveq_f32<TANH>(const float32x4_t &x) {
  float32x4_t __one = vdupq_n_f32(1.f);
  float32x4_t __x = vnegq_f32(x);
  __x = vmulq_n_f32(__x, 2.f);
  __x = exp_ps(__x);
  __x = vaddq_f32(__x, __one);
  float32x4_t __out = vrecpeq_f32(__x);
  __out = vmulq_f32(vrecpsq_f32(__x, __out), __out);
  __out = vmulq_n_f32(__out, 2.f);
  return vsubq_f32(__out, __one);
}
102 103 104 105 106

template <>
inline float32x4_t vActiveq_f32<LOG>(const float32x4_t &x) {
  return log_ps(x);
}
107 108
#endif

109
template <ActivationType Act = IDENTITY>
110 111 112 113 114
inline float Active(const float &x) {
  return x;
}

template <>
115
inline float Active<RELU>(const float &x) {
116 117 118 119
  return std::max(x, 0.f);
}

template <>
120
inline float Active<RELU6>(const float &x) {
121 122 123
  return std::min(std::max(x, 0.f), 6.f);
}

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
template <>
inline float Active<SIGMOID>(const float &x) {
  //  float tmp = x > SIGMOID_THRESHOLD_MAX ? SIGMOID_THRESHOLD_MAX : x;
  //  tmp = x > SIGMOID_THRESHOLD_MIN ? x : SIGMOID_THRESHOLD_MIN;
  //  return 1.f / (1.f + exp(-tmp));
  return 1.f / (1.f + exp(-x));
}

template <>
inline float Active<TANH>(const float &x) {
  //  float tmp = -2.f * x;
  //  tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
  //  return (2.f / (1.f + exp(tmp))) - 1.f;
  return 2.f / (1.f + exp(-2.f * x)) - 1.f;
}

140 141 142 143 144
template <>
inline float Active<LOG>(const float &x) {
  return log(x);
}

145 146 147
}  // namespace math
}  // namespace operators
}  // namespace paddle_mobile