diff --git a/mace/core/net_def_adapter.cc b/mace/core/net_def_adapter.cc index 079986798de722bf7f7440fa8307b6a28814a44e..54fc761958d195dd8f29d43ca01f00176c2e378d 100644 --- a/mace/core/net_def_adapter.cc +++ b/mace/core/net_def_adapter.cc @@ -199,11 +199,11 @@ MaceStatus NetDefAdapter::AdaptNetDef( input_data_format, input_shape, -1)); } - OpConditionContext context(ws_, &tensor_shape_map); DataFormat op_output_data_format; MemoryType op_output_mem_type; for (int idx = 0; idx < net_def->op_size(); ++idx) { OperatorDef op_def(net_def->op(idx)); + OpConditionContext context(ws_, &tensor_shape_map); context.set_operator_def(&op_def); // Select device MACE_RETURN_IF_ERROR(this->AdaptDevice(&context, diff --git a/mace/ops/activation.cc b/mace/ops/activation.cc index 17a3a905d62542c89656b1322c78a543f3505486..ae395996ba00311de0f7cfa6df9733af49cecc82 100644 --- a/mace/ops/activation.cc +++ b/mace/ops/activation.cc @@ -47,14 +47,14 @@ class ActivationOp : public Operation { activation_type_, Operation::GetOptionalArg("max_limit", 0.f), Operation::GetOptionalArg( - "leakyrelu_coefficient", 0.f)))) {} + "activation_coefficient", 0.f)))) {} MaceStatus Run(OpContext *context) override { MACE_UNUSED(context); const Tensor *input = this->Input(0); Tensor *output = this->Output(0); - if (activation_type_ == PRELU || activation_type_ == ELU) { + if (activation_type_ == PRELU) { MACE_RETURN_IF_ERROR(output->ResizeLike(input)); const T *input_ptr = input->data(); T *output_ptr = output->mutable_data(); @@ -63,8 +63,8 @@ class ActivationOp : public Operation { const T *alpha_ptr = alpha->data(); const index_t outer_size = output->dim(0); const index_t inner_size = output->dim(2) * output->dim(3); - ActivationWithAlpha(context, input_ptr, outer_size, input->dim(1), - inner_size, alpha_ptr, activation_type_, output_ptr); + PReLUActivation(context, input_ptr, outer_size, input->dim(1), + inner_size, alpha_ptr, output_ptr); } else { activation_delegator_->Compute(context, input, output); } @@ -86,17 +86,17 @@ class ActivationOp : public Operation { Operation::GetOptionalArg("activation", "NOOP")); auto relux_max_limit = Operation::GetOptionalArg("max_limit", 0.0f); - auto leakyrelu_coefficient = - Operation::GetOptionalArg("leakyrelu_coefficient", 0.0f); + auto activation_coefficient = + Operation::GetOptionalArg("activation_coefficient", 0.0f); MemoryType mem_type; if (context->GetOpMemoryType() == MemoryType::GPU_IMAGE) { mem_type = MemoryType::GPU_IMAGE; kernel_ = make_unique( - type, relux_max_limit, leakyrelu_coefficient); + type, relux_max_limit, activation_coefficient); } else { MACE_NOT_IMPLEMENTED; } - if (type == ActivationType::PRELU || type == ActivationType::ELU) { + if (type == ActivationType::PRELU) { MACE_CHECK(TransformFilter( context, operator_def_.get(), 1, OpenCLBufferType::ARGUMENT, mem_type) == MaceStatus::MACE_SUCCESS); diff --git a/mace/ops/activation.h b/mace/ops/activation.h index 95f65777a9ba943993d7a4db7edc25e7f6d39106..c4e37249e8a2cdd76573378da6e6afb6f8391797 100644 --- a/mace/ops/activation.h +++ b/mace/ops/activation.h @@ -51,14 +51,13 @@ inline ActivationType StringToActivationType(const std::string type) { } template -void ActivationWithAlpha(const OpContext *context, - const T *input_ptr, - const index_t outer_size, - const index_t input_chan, - const index_t inner_size, - const T *alpha_ptr, - const index_t activation_type, - T *output_ptr) { +void PReLUActivation(const OpContext *context, + const T *input_ptr, + const index_t outer_size, + const index_t input_chan, + const index_t inner_size, + const T *alpha_ptr, + T *output_ptr) { utils::ThreadPool &thread_pool = context->device()->cpu_runtime()->thread_pool(); @@ -69,12 +68,7 @@ void ActivationWithAlpha(const OpContext *context, for (index_t j = 0; j < inner_size; ++j) { index_t idx = i * input_chan * inner_size + chan_idx * inner_size + j; if (input_ptr[idx] < 0) { - if (activation_type == ActivationType::PRELU) { - output_ptr[idx] = input_ptr[idx] * alpha_ptr[chan_idx]; - } else if (activation_type == ActivationType::ELU) { - output_ptr[idx] = - (std::exp(input_ptr[idx]) - 1) * alpha_ptr[chan_idx]; - } + output_ptr[idx] = input_ptr[idx] * alpha_ptr[chan_idx]; } else { output_ptr[idx] = input_ptr[idx]; } diff --git a/mace/ops/arm/base/activation.cc b/mace/ops/arm/base/activation.cc index ab9a5336d44be716c03a73740ef2fd1c7063b42e..28fab419c1d125cdef791ecf805e7cd13bb470cf 100644 --- a/mace/ops/arm/base/activation.cc +++ b/mace/ops/arm/base/activation.cc @@ -81,6 +81,11 @@ void Activation::DoActivation(const OpContext *context, break; } + case ELU: { + ActivateElu(&thread_pool, input, output); + break; + } + case NOOP: { break; } @@ -164,7 +169,7 @@ void Activation::ActivateLeakyRelu(utils::ThreadPool *thread_pool, auto output_data = output->mutable_data(); const index_t input_size = input->size(); const float32x4_t vzero = vdupq_n_f32(0.f); - const float32x4_t valpha = vdupq_n_f32(leakyrelu_coefficient_); + const float32x4_t valpha = vdupq_n_f32(activation_coefficient_); const index_t block_count = input_size / 4; thread_pool->Compute1D( @@ -188,7 +193,7 @@ void Activation::ActivateLeakyRelu(utils::ThreadPool *thread_pool, // remain for (index_t i = block_count * 4; i < input_size; ++i) { output_data[i] = std::max(input_data[i], 0.f) + - std::min(input_data[i], 0.f) * leakyrelu_coefficient_; + std::min(input_data[i], 0.f) * activation_coefficient_; } } @@ -226,6 +231,28 @@ void Activation::ActivateSigmoid(utils::ThreadPool *thread_pool, 0, input_size, 1); } +template +void Activation::ActivateElu(utils::ThreadPool *thread_pool, + const Tensor *input, + Tensor *output) { + const auto *input_data = input->data(); + auto *output_data = output->mutable_data(); + const index_t input_size = input->size(); + + thread_pool->Compute1D( + [=](index_t start, index_t end, index_t step) { + for (index_t i = start; i < end; i += step) { + const auto in_val = input_data[i]; + if (in_val < 0) { + output_data[i] = (std::exp(in_val) - 1) * activation_coefficient_; + } else { + output_data[i] = in_val; + } + } + }, + 0, input_size, 1); +} + void RegisterActivationDelegator(OpDelegatorRegistry *registry) { MACE_REGISTER_DELEGATOR( registry, Activation, delegator::ActivationParam, @@ -240,7 +267,7 @@ void RegisterActivationDelegator(OpDelegatorRegistry *registry) { MACE_REGISTER_BF16_DELEGATOR( registry, Activation, delegator::ActivationParam, MACE_DELEGATOR_KEY(Activation, DeviceType::CPU, BFloat16, - ImplType::NEON)); + ImplType::NEON)); } } // namespace arm diff --git a/mace/ops/arm/base/activation.h b/mace/ops/arm/base/activation.h index 3e82b1d6d9a21d2b535195c7802a3b8a4b5aab1b..1b07604e10ae8d9c2a48497a4971daa54f876b5e 100644 --- a/mace/ops/arm/base/activation.h +++ b/mace/ops/arm/base/activation.h @@ -45,6 +45,8 @@ class Activation : public delegator::Activation { Tensor *output); void ActivateSigmoid(utils::ThreadPool *thread_pool, const Tensor *input, Tensor *output); + void ActivateElu(utils::ThreadPool *thread_pool, const Tensor *input, + Tensor *output); }; } // namespace arm diff --git a/mace/ops/arm/q8/activation.cc b/mace/ops/arm/q8/activation.cc index 875f8ba442ee55b75036b4a1776b1de73564e056..63bb4b64113d306250228321ad1346eff75d4062 100644 --- a/mace/ops/arm/q8/activation.cc +++ b/mace/ops/arm/q8/activation.cc @@ -123,6 +123,16 @@ void Activation::ActivateSigmoid(utils::ThreadPool *thread_pool, MACE_NOT_IMPLEMENTED; } +template<> +void Activation::ActivateElu(utils::ThreadPool *thread_pool, + const Tensor *input, + Tensor *output) { + MACE_UNUSED(thread_pool); + MACE_UNUSED(input); + MACE_UNUSED(output); + MACE_NOT_IMPLEMENTED; +} + } // namespace arm } // namespace ops } // namespace mace diff --git a/mace/ops/batch_norm.cc b/mace/ops/batch_norm.cc index a25936b1ce2534047f78d4b92faa3ac75c7e98d9..49fc8002ea66479bbb7bc21274b22caafc54a7ae 100644 --- a/mace/ops/batch_norm.cc +++ b/mace/ops/batch_norm.cc @@ -50,7 +50,7 @@ class BatchNormOp : public Operation { Operation::GetOptionalArg("activation", "NOOP")), Operation::GetOptionalArg("max_limit", 0.0f), - Operation::GetOptionalArg("leakyrelu_coefficient", + Operation::GetOptionalArg("activation_coefficient", 0.0f)))) {} MaceStatus Run(OpContext *context) override { @@ -168,13 +168,13 @@ class BatchNormOp : public Operation { ActivationType activation = ops::StringToActivationType( Operation::GetOptionalArg("activation", "NOOP")); float relux_max_limit = Operation::GetOptionalArg("max_limit", 0.0f); - float leakyrelu_coefficient = Operation::GetOptionalArg( - "leakyrelu_coefficient", 0.0f); + float activation_coefficient = Operation::GetOptionalArg( + "activation_coefficient", 0.0f); MemoryType mem_type; if (context->GetOpMemoryType() == MemoryType::GPU_IMAGE) { mem_type = MemoryType::GPU_IMAGE; kernel_ = make_unique( - epsilon, activation, relux_max_limit, leakyrelu_coefficient); + epsilon, activation, relux_max_limit, activation_coefficient); } else { MACE_NOT_IMPLEMENTED; } diff --git a/mace/ops/conv_2d.cc b/mace/ops/conv_2d.cc index 03b098c65400331d3d57ec6402186e27e01825f5..26b4d02a9c9ddad721ceccffa3888df71ab3e309 100644 --- a/mace/ops/conv_2d.cc +++ b/mace/ops/conv_2d.cc @@ -68,7 +68,7 @@ class Conv2dOp : public ConvPool2dOpBase { Operation::GetOptionalArg("activation", "NOOP")), Operation::GetOptionalArg("max_limit", 0.0f), - Operation::GetOptionalArg("leakyrelu_coefficient", + Operation::GetOptionalArg("activation_coefficient", 0.0f)))), bias_add_delegator_(delegator::BiasAdd::Create( context->workspace(), @@ -190,8 +190,8 @@ class Conv2dOp : public ConvPool2dOpBase { Operation::GetOptionalArg("activation", "NOOP"))), relux_max_limit_(Operation::GetOptionalArg("max_limit", 0.0f)), - leakyrelu_coefficient_(Operation::GetOptionalArg( - "leakyrelu_coefficient", 0.0f)) {} + activation_coefficient_(Operation::GetOptionalArg( + "activation_coefficient", 0.0f)) {} MaceStatus Run(OpContext *context) override { const Tensor *input = this->Input(INPUT); @@ -414,7 +414,7 @@ class Conv2dOp : public ConvPool2dOpBase { private: const ActivationType activation_; const float relux_max_limit_; - const float leakyrelu_coefficient_; + const float activation_coefficient_; std::vector bias_; private: @@ -433,8 +433,8 @@ class Conv2dOp : public ConvPool2dOpBase { Operation::GetOptionalArg("activation", "NOOP"))), relux_max_limit_(Operation::GetOptionalArg("max_limit", 0.0f)), - leakyrelu_coefficient_(Operation::GetOptionalArg( - "leakyrelu_coefficient", 0.0f)), + activation_coefficient_(Operation::GetOptionalArg( + "activation_coefficient", 0.0f)), wino_block_size_(Operation::GetOptionalArg("wino_block_size", 0)) { MemoryType mem_type; if (context->GetOpMemoryType() == MemoryType::GPU_IMAGE) { @@ -488,13 +488,13 @@ class Conv2dOp : public ConvPool2dOpBase { return kernel_->Compute(context, input, filter, bias, strides_.data(), padding_type_, paddings_, dilations_.data(), activation_, relux_max_limit_, - leakyrelu_coefficient_, wino_block_size_, output); + activation_coefficient_, wino_block_size_, output); } private: const ActivationType activation_; const float relux_max_limit_; - const float leakyrelu_coefficient_; + const float activation_coefficient_; std::unique_ptr kernel_; int wino_block_size_; diff --git a/mace/ops/deconv_2d.cc b/mace/ops/deconv_2d.cc index dc58141a82bb715ca16ac66f27a231b6e47cb9e1..4df66c0d766de2c325da9e66377514b8ff3b66d1 100644 --- a/mace/ops/deconv_2d.cc +++ b/mace/ops/deconv_2d.cc @@ -56,8 +56,8 @@ class Deconv2dOp : public Deconv2dOpBase { context->workspace(), MACE_DELEGATOR_KEY(Activation, DeviceType::CPU, T, kCpuImplType), - delegator::ActivationParam(activation_, relux_max_limit_, - leakyrelu_coefficient_))), + delegator::ActivationParam( + activation_, relux_max_limit_, activation_coefficient_))), bias_add_delegator_(delegator::BiasAdd::Create( context->workspace(), MACE_DELEGATOR_KEY(BiasAdd, DeviceType::CPU, T, kCpuImplType), @@ -228,7 +228,7 @@ class Deconv2dOp : public Deconv2dOpBase { return kernel_->Compute(context, input, filter, bias, strides_.data(), in_paddings.data(), activation_, - relux_max_limit_, leakyrelu_coefficient_, + relux_max_limit_, activation_coefficient_, out_shape, output); } diff --git a/mace/ops/deconv_2d.h b/mace/ops/deconv_2d.h index a11d5f8a8bd77a7be78605a6a256331d2ceccdd7..d4b85cfdbc0a76d5ca3bad6ce1cf540f453fff1a 100644 --- a/mace/ops/deconv_2d.h +++ b/mace/ops/deconv_2d.h @@ -43,8 +43,8 @@ class Deconv2dOpBase : public Operation { "NOOP"))), relux_max_limit_( Operation::GetOptionalArg("max_limit", 0.0f)), - leakyrelu_coefficient_( - Operation::GetOptionalArg("leakyrelu_coefficient", 0.0f)) {} + activation_coefficient_( + Operation::GetOptionalArg("activation_coefficient", 0.0f)) {} protected: std::vector strides_; // [stride_h, stride_w] @@ -54,7 +54,7 @@ class Deconv2dOpBase : public Operation { const FrameworkType model_type_; const ActivationType activation_; const float relux_max_limit_; - const float leakyrelu_coefficient_; + const float activation_coefficient_; }; } // namespace ops diff --git a/mace/ops/delegator/activation.h b/mace/ops/delegator/activation.h index 80a9c6b376fceda5d84d2de4eb7358213df9613b..ec0b1a07c331405fd068d9688deb2b166dbf202e 100644 --- a/mace/ops/delegator/activation.h +++ b/mace/ops/delegator/activation.h @@ -26,20 +26,20 @@ namespace delegator { struct ActivationParam : public DelegatorParam { explicit ActivationParam(ActivationType type, const float limit, - const float leakyrelu_coefficient) + const float activation_coefficient) : type_(type), limit_(limit), - leakyrelu_coefficient_(leakyrelu_coefficient) {} + activation_coefficient_(activation_coefficient) {} ActivationType type_; const float limit_; - const float leakyrelu_coefficient_; + const float activation_coefficient_; }; class Activation : public OpDelegator { public: explicit Activation(const ActivationParam ¶m) : OpDelegator(param), type_(param.type_), limit_(param.limit_), - leakyrelu_coefficient_(param.leakyrelu_coefficient_) {} + activation_coefficient_(param.activation_coefficient_) {} virtual ~Activation() = default; MACE_DEFINE_DELEGATOR_CREATOR(Activation) @@ -51,7 +51,7 @@ class Activation : public OpDelegator { protected: ActivationType type_; const float limit_; - const float leakyrelu_coefficient_; + const float activation_coefficient_; }; } // namespace delegator diff --git a/mace/ops/depthwise_conv2d.cc b/mace/ops/depthwise_conv2d.cc index 903a2cfd6d082eb992627d8bd898852b9b3c8e79..3f0d7d64e1b45876072d1a19fcd007dc3a356b9f 100644 --- a/mace/ops/depthwise_conv2d.cc +++ b/mace/ops/depthwise_conv2d.cc @@ -52,12 +52,12 @@ class DepthwiseConv2dOpBase : public ConvPool2dOpBase { Operation::GetOptionalArg("activation", "NOOP"))), relux_max_limit_(Operation::GetOptionalArg("max_limit", 0.0f)), - leakyrelu_coefficient_(Operation::GetOptionalArg( - "leakyrelu_coefficient", 0.0f)) {} + activation_coefficient_(Operation::GetOptionalArg( + "activation_coefficient", 0.0f)) {} protected: const ActivationType activation_; const float relux_max_limit_; - const float leakyrelu_coefficient_; + const float activation_coefficient_; }; template @@ -73,8 +73,8 @@ class DepthwiseConv2dOp : public DepthwiseConv2dOpBase { context->workspace(), MACE_DELEGATOR_KEY(Activation, DeviceType::CPU, T, kCpuImplType), - delegator::ActivationParam(activation_, relux_max_limit_, - leakyrelu_coefficient_))), + delegator::ActivationParam( + activation_, relux_max_limit_, activation_coefficient_))), bias_add_delegator_(delegator::BiasAdd::Create( context->workspace(), MACE_DELEGATOR_KEY(BiasAdd, DeviceType::CPU, T, kCpuImplType), @@ -389,7 +389,7 @@ class DepthwiseConv2dOp : return kernel_->Compute(context, input, filter, bias, strides_.data(), padding_type_, paddings_, dilations_.data(), activation_, relux_max_limit_, - leakyrelu_coefficient_, output); + activation_coefficient_, output); } private: diff --git a/mace/ops/depthwise_deconv2d.cc b/mace/ops/depthwise_deconv2d.cc index 615b5eccc93a359eea915f5da541d4e57fff3b35..b1373d7649f7f58621fe3dc0f93a240acb2ee281 100644 --- a/mace/ops/depthwise_deconv2d.cc +++ b/mace/ops/depthwise_deconv2d.cc @@ -55,8 +55,8 @@ class DepthwiseDeconv2dOp context->workspace(), MACE_DELEGATOR_KEY(Activation, DeviceType::CPU, T, kCpuImplType), - delegator::ActivationParam(activation_, relux_max_limit_, - leakyrelu_coefficient_))), + delegator::ActivationParam( + activation_, relux_max_limit_, activation_coefficient_))), bias_add_delegator_(delegator::BiasAdd::Create( context->workspace(), MACE_DELEGATOR_KEY(BiasAdd, DeviceType::CPU, T, kCpuImplType), @@ -209,7 +209,7 @@ class DepthwiseDeconv2dOp : public Deconv2dOpBase { group_, activation_, relux_max_limit_, - leakyrelu_coefficient_, + activation_coefficient_, out_shape, output); } diff --git a/mace/ops/fully_connected.cc b/mace/ops/fully_connected.cc index f0c83a25b6577ef633346d75d7d38e2d2bac107a..e81e1e902093b55644f0b83607ae854dc636830f 100644 --- a/mace/ops/fully_connected.cc +++ b/mace/ops/fully_connected.cc @@ -42,12 +42,12 @@ class FullyConnectedOpBase : public Operation { Operation::GetOptionalArg("activation", "NOOP"))), relux_max_limit_(Operation::GetOptionalArg("max_limit", 0.0f)), - leakyrelu_coefficient_(Operation::GetOptionalArg( - "leakyrelu_coefficient", 0.0f)) {} + activation_coefficient_(Operation::GetOptionalArg( + "activation_coefficient", 0.0f)) {} protected: const ActivationType activation_; const float relux_max_limit_; - const float leakyrelu_coefficient_; + const float activation_coefficient_; MACE_OP_INPUT_TAGS(INPUT, WEIGHT, BIAS); MACE_OP_OUTPUT_TAGS(OUTPUT); @@ -64,9 +64,8 @@ class FullyConnectedOp : public FullyConnectedOpBase { activation_delegator_(delegator::Activation::Create( context->workspace(), MACE_DELEGATOR_KEY(Activation, DeviceType::CPU, T, kCpuImplType), - delegator::ActivationParam(activation_, - relux_max_limit_, - leakyrelu_coefficient_))), + delegator::ActivationParam( + activation_, relux_max_limit_, activation_coefficient_))), gemv_(delegator::Gemv::Create( context->workspace(), MACE_DELEGATOR_KEY(Gemv, DeviceType::CPU, T, kCpuImplType), @@ -215,7 +214,7 @@ class FullyConnectedOp : public FullyConnectedOpBase { " don't match."); return kernel_->Compute( context, input, weight, bias, activation_, relux_max_limit_, - leakyrelu_coefficient_, output); + activation_coefficient_, output); } private: diff --git a/mace/ops/opencl/buffer/conv_2d.cc b/mace/ops/opencl/buffer/conv_2d.cc index 50109b6e2341f488ff39de17360d448dd238dc72..1c604d8ee8f2e2a6e0d672be5a45a6a3029683df 100644 --- a/mace/ops/opencl/buffer/conv_2d.cc +++ b/mace/ops/opencl/buffer/conv_2d.cc @@ -46,7 +46,7 @@ MaceStatus Conv2dKernel::Compute( const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const int winograd_blk_size, Tensor *output) { MACE_UNUSED(winograd_blk_size); @@ -148,14 +148,14 @@ MaceStatus Conv2dKernel::Compute( return conv2d::Conv2d1x1( context, &kernels_[1], pad_input, filter, bias, strides, activation, relux_max_limit, - leakyrelu_coefficient, input_changed, output, &conv_future); + activation_coefficient, input_changed, output, &conv_future); }; } else { conv_func = [&](const Tensor *pad_input, Tensor *output) -> MaceStatus { return conv2d::Conv2dGeneral( context, &kernels_[1], pad_input, filter, bias, strides, dilations, activation, relux_max_limit, - leakyrelu_coefficient, input_changed, output, &conv_future); + activation_coefficient, input_changed, output, &conv_future); }; } MACE_RETURN_IF_ERROR(conv_func(padded_input_ptr, output)); diff --git a/mace/ops/opencl/buffer/conv_2d.h b/mace/ops/opencl/buffer/conv_2d.h index 563b835861ea76c6cb90b8ad27f2fa4c9d09e955..224f6670b5519fea4c7887c271682208cb549bd1 100644 --- a/mace/ops/opencl/buffer/conv_2d.h +++ b/mace/ops/opencl/buffer/conv_2d.h @@ -38,7 +38,7 @@ extern MaceStatus Conv2d1x1(OpContext *context, const int *strides, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const bool input_changed, Tensor *output, StatsFuture *future); @@ -52,7 +52,7 @@ extern MaceStatus Conv2dGeneral(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const bool input_changed, Tensor *output, StatsFuture *future); @@ -81,7 +81,7 @@ class Conv2dKernel : public OpenCLConv2dKernel { const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const int winograd_blk_size, Tensor *output) override; diff --git a/mace/ops/opencl/buffer/conv_2d_1x1.cc b/mace/ops/opencl/buffer/conv_2d_1x1.cc index 001c201d29281f66dbb8bc46c27b3a779114387b..5b80d90f283c0fd5ce57c8ff130ae1d2b4043309 100644 --- a/mace/ops/opencl/buffer/conv_2d_1x1.cc +++ b/mace/ops/opencl/buffer/conv_2d_1x1.cc @@ -31,7 +31,7 @@ MaceStatus Conv2d1x1(OpContext *context, const int *strides, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const bool input_changed, Tensor *output, StatsFuture *future) { @@ -75,6 +75,9 @@ MaceStatus Conv2d1x1(OpContext *context, case LEAKYRELU: built_options.emplace("-DUSE_LEAKYRELU"); break; + case ELU: + built_options.emplace("-DUSE_ELU"); + break; default: LOG(FATAL) << "Unknown activation type: " << activation; } @@ -110,7 +113,7 @@ MaceStatus Conv2d1x1(OpContext *context, kernel->setArg(idx++, strides[0]); kernel->setArg(idx++, strides[1]); kernel->setArg(idx++, relux_max_limit); - kernel->setArg(idx++, leakyrelu_coefficient); + kernel->setArg(idx++, activation_coefficient); kernel->setArg(idx++, *(output->opencl_buffer())); } diff --git a/mace/ops/opencl/buffer/conv_2d_general.cc b/mace/ops/opencl/buffer/conv_2d_general.cc index 9e7d75089b03d6d45a4f293b80105e3c5ac6a2d3..8bdb419fb7dac8d3763ba0ea7f6620745ea11de2 100644 --- a/mace/ops/opencl/buffer/conv_2d_general.cc +++ b/mace/ops/opencl/buffer/conv_2d_general.cc @@ -32,7 +32,7 @@ MaceStatus Conv2dGeneral(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const bool input_changed, Tensor *output, StatsFuture *future) { @@ -81,6 +81,9 @@ MaceStatus Conv2dGeneral(OpContext *context, case LEAKYRELU: built_options.emplace("-DUSE_LEAKYRELU"); break; + case ELU: + built_options.emplace("-DUSE_ELU"); + break; default: LOG(FATAL) << "Unknown activation type: " << activation; } @@ -125,7 +128,7 @@ MaceStatus Conv2dGeneral(OpContext *context, kernel->setArg(idx++, static_cast( dilations[1] * in_channel)); kernel->setArg(idx++, relux_max_limit); - kernel->setArg(idx++, leakyrelu_coefficient); + kernel->setArg(idx++, activation_coefficient); kernel->setArg(idx++, *(output->opencl_buffer())); } diff --git a/mace/ops/opencl/buffer/depthwise_conv2d.cc b/mace/ops/opencl/buffer/depthwise_conv2d.cc index 48c9829f4cd3ad04daf95b5d1964807b9e0a0e67..88a7c5356b6f8dabcedd3c58121862cb3c32433e 100644 --- a/mace/ops/opencl/buffer/depthwise_conv2d.cc +++ b/mace/ops/opencl/buffer/depthwise_conv2d.cc @@ -32,7 +32,7 @@ MaceStatus DepthwiseConv2d(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const bool input_changed, Tensor *output, StatsFuture *future) { @@ -79,6 +79,9 @@ MaceStatus DepthwiseConv2d(OpContext *context, case LEAKYRELU: built_options.emplace("-DUSE_LEAKYRELU"); break; + case ELU: + built_options.emplace("-DUSE_ELU"); + break; default: LOG(FATAL) << "Unknown activation type: " << activation; } @@ -119,7 +122,7 @@ MaceStatus DepthwiseConv2d(OpContext *context, kernel->setArg(idx++, static_cast( dilations[1] * in_channel)); kernel->setArg(idx++, relux_max_limit); - kernel->setArg(idx++, leakyrelu_coefficient); + kernel->setArg(idx++, activation_coefficient); kernel->setArg(idx++, *(output->opencl_buffer())); } @@ -147,7 +150,7 @@ MaceStatus DepthwiseConv2dKernel::Compute( const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, Tensor *output) { StatsFuture pad_future, dw_conv_future; index_t filter_w = filter->dim(3); @@ -242,7 +245,7 @@ MaceStatus DepthwiseConv2dKernel::Compute( depthwise::DepthwiseConv2d( context, &kernels_[1], padded_input_ptr, filter, bias, strides, dilations, activation, relux_max_limit, - leakyrelu_coefficient, input_changed, output, &dw_conv_future)); + activation_coefficient, input_changed, output, &dw_conv_future)); MergeMultipleFutureWaitFn({pad_future, dw_conv_future}, context->future()); return MaceStatus::MACE_SUCCESS; } diff --git a/mace/ops/opencl/buffer/depthwise_conv2d.h b/mace/ops/opencl/buffer/depthwise_conv2d.h index 60d680777ba06af2aec2c04ff42dcad6a5bd5caa..e8990b301bd561006b8e77f653c25f33de0bfa53 100644 --- a/mace/ops/opencl/buffer/depthwise_conv2d.h +++ b/mace/ops/opencl/buffer/depthwise_conv2d.h @@ -39,7 +39,7 @@ MaceStatus DepthwiseConv2d(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const bool input_changed, Tensor *output, StatsFuture *future); @@ -59,7 +59,7 @@ class DepthwiseConv2dKernel : public OpenCLDepthwiseConv2dKernel { const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, Tensor *output) override; private: diff --git a/mace/ops/opencl/cl/activation.cl b/mace/ops/opencl/cl/activation.cl index 8e825eceab33f0dfa542e5168b863871be6ef8d0..46615a778743d43e8bf0322ac3e888c333cb1c5c 100644 --- a/mace/ops/opencl/cl/activation.cl +++ b/mace/ops/opencl/cl/activation.cl @@ -3,11 +3,11 @@ __kernel void activation(OUT_OF_RANGE_PARAMS GLOBAL_WORK_GROUP_SIZE_DIM3 __read_only image2d_t input, -#if defined (USE_PRELU) || defined (USE_ELU) +#ifdef USE_PRELU __read_only image2d_t alpha, #endif __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float coefficient, __write_only image2d_t output) { const int ch_blk = get_global_id(0); const int w = get_global_id(1); @@ -23,11 +23,11 @@ __kernel void activation(OUT_OF_RANGE_PARAMS const int pos = mad24(ch_blk, width, w); DATA_TYPE4 in = READ_IMAGET(input, SAMPLER, (int2)(pos, hb)); -#if defined (USE_PRELU) || defined (USE_ELU) +#ifdef USE_PRELU DATA_TYPE4 activation_alpha = READ_IMAGET(alpha, SAMPLER, (int2)(ch_blk, 0)); - DATA_TYPE4 out = do_activation(in, activation_alpha, relux_max_limit, leakyrelu_coefficient); + DATA_TYPE4 out = do_activation(in, activation_alpha, relux_max_limit, coefficient); #else - DATA_TYPE4 out = do_activation(in, relux_max_limit, leakyrelu_coefficient); + DATA_TYPE4 out = do_activation(in, relux_max_limit, coefficient); #endif WRITE_IMAGET(output, (int2)(pos, hb), out); diff --git a/mace/ops/opencl/cl/batch_norm.cl b/mace/ops/opencl/cl/batch_norm.cl index 87da37d042cfff33d2119bda56c9596bf1dce59e..37faf876c81baaea3fc9956f185ade03cf90d72b 100644 --- a/mace/ops/opencl/cl/batch_norm.cl +++ b/mace/ops/opencl/cl/batch_norm.cl @@ -12,7 +12,7 @@ __kernel void batch_norm(OUT_OF_RANGE_PARAMS #endif __write_only image2d_t output, __private const float relux_max_limit, - __private const float leakyrelu_coefficient) { + __private const float activation_coefficient) { const int ch_blk = get_global_id(0); const int w = get_global_id(1); const int hb = get_global_id(2); @@ -44,8 +44,8 @@ __kernel void batch_norm(OUT_OF_RANGE_PARAMS DATA_TYPE4 in = READ_IMAGET(input, SAMPLER, (int2)(pos, hb)); DATA_TYPE4 out = mad(in, bn_scale, bn_offset); -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out = do_activation(out, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out = do_activation(out, relux_max_limit, activation_coefficient); #endif WRITE_IMAGET(output, (int2)(pos, hb), out); diff --git a/mace/ops/opencl/cl/common.h b/mace/ops/opencl/cl/common.h index 0bd97045b869ef40333eb47cf3f4a048ab289b18..3cf2cc27927275adf1232c812f0868b4bda42176 100644 --- a/mace/ops/opencl/cl/common.h +++ b/mace/ops/opencl/cl/common.h @@ -83,11 +83,11 @@ inline float4 do_sigmoid(float4 in) { #ifdef DATA_TYPE inline DATA_TYPE4 do_activation(DATA_TYPE4 in, -#if defined (USE_PRELU) || defined (USE_ELU) +#if defined (USE_PRELU) DATA_TYPE4 alpha, #endif __private const float relux_max_limit, - __private const float leakyrelu_coefficient) { + __private const float activation_coefficient) { DATA_TYPE4 out; #ifdef USE_RELU out = fmax(in, (DATA_TYPE)0); @@ -99,7 +99,8 @@ inline DATA_TYPE4 do_activation(DATA_TYPE4 in, out = select(alpha * in, in, in >= (DATA_TYPE)0); #endif #ifdef USE_ELU - out = select(alpha * (native_exp(in) - 1.0f), in, in >= (DATA_TYPE)0); + out = select(activation_coefficient * (native_exp(in) - 1.0f), + in, in >= (DATA_TYPE)0); #endif #ifdef USE_TANH out = tanh(in); @@ -108,7 +109,7 @@ inline DATA_TYPE4 do_activation(DATA_TYPE4 in, out = do_sigmoid(in); #endif #ifdef USE_LEAKYRELU - out = select(leakyrelu_coefficient * in, in, in >= (DATA_TYPE)0); + out = select(activation_coefficient * in, in, in >= (DATA_TYPE)0); #endif return out; } diff --git a/mace/ops/opencl/cl/conv_2d.cl b/mace/ops/opencl/cl/conv_2d.cl index 2bf4572b85184b0056d7a9d113953d13b43475e0..53b06f2873766f97de3387a19276eb58676e1117 100644 --- a/mace/ops/opencl/cl/conv_2d.cl +++ b/mace/ops/opencl/cl/conv_2d.cl @@ -9,7 +9,7 @@ __kernel void conv_2d(OUT_OF_RANGE_PARAMS #endif __write_only image2d_t output, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __private const int in_height, __private const int in_width, __private const int in_ch_blks, @@ -125,11 +125,11 @@ __kernel void conv_2d(OUT_OF_RANGE_PARAMS } } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); #endif const int out_x_base = mul24(out_ch_blk, out_width); diff --git a/mace/ops/opencl/cl/conv_2d_1x1.cl b/mace/ops/opencl/cl/conv_2d_1x1.cl index 1d146028133118015565259b7296455490cd6434..ca62c8ac2ba23b8e4b1d5e636c6a623c1751ce6d 100644 --- a/mace/ops/opencl/cl/conv_2d_1x1.cl +++ b/mace/ops/opencl/cl/conv_2d_1x1.cl @@ -9,7 +9,7 @@ __kernel void conv_2d_1x1(OUT_OF_RANGE_PARAMS #endif __write_only image2d_t output, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __private const int in_height, __private const int in_width, __private const int in_ch_blks, @@ -98,11 +98,11 @@ __kernel void conv_2d_1x1(OUT_OF_RANGE_PARAMS filter_x_base += 4; } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); #endif const int out_x_base = mul24(out_ch_blk, width); diff --git a/mace/ops/opencl/cl/conv_2d_1x1_buffer.cl b/mace/ops/opencl/cl/conv_2d_1x1_buffer.cl index be9f36e171c71ac2061260ef3b015bb87d5dca01..72669030bb197a3a262eec67ac4b4dd46e583cfd 100644 --- a/mace/ops/opencl/cl/conv_2d_1x1_buffer.cl +++ b/mace/ops/opencl/cl/conv_2d_1x1_buffer.cl @@ -17,7 +17,7 @@ __kernel void conv2d(BUFFER_OUT_OF_RANGE_PARAMS __private const int stride_h, __private const int stride_w, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __global OUT_DATA_TYPE *output) { const int out_wc_blk_idx = get_global_id(0); const int out_hb_idx = get_global_id(1); @@ -80,9 +80,9 @@ __kernel void conv2d(BUFFER_OUT_OF_RANGE_PARAMS in_offset += 4; } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); #endif int out_offset = mad24(mad24(mad24(batch_idx, out_height, out_height_idx), diff --git a/mace/ops/opencl/cl/conv_2d_3x3.cl b/mace/ops/opencl/cl/conv_2d_3x3.cl index 0c37cd8040cc8eec243d133fd9ce79bef344bf64..f918504ae9a6d186e4695f521cdea38ff7e1c233 100644 --- a/mace/ops/opencl/cl/conv_2d_3x3.cl +++ b/mace/ops/opencl/cl/conv_2d_3x3.cl @@ -9,7 +9,7 @@ __kernel void conv_2d_3x3(OUT_OF_RANGE_PARAMS #endif __write_only image2d_t output, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __private const int in_height, __private const int in_width, __private const int in_ch_blks, @@ -130,12 +130,12 @@ __kernel void conv_2d_3x3(OUT_OF_RANGE_PARAMS } } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); - out4 = do_activation(out4, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); + out4 = do_activation(out4, relux_max_limit, activation_coefficient); #endif const int out_x_base = mul24(out_ch_blk, out_width); diff --git a/mace/ops/opencl/cl/conv_2d_buffer.cl b/mace/ops/opencl/cl/conv_2d_buffer.cl index 41efc13a6c935291889e667d4eb3b7cee50f7472..d90e595134464795ff43044f82c61a4e5d890fb3 100644 --- a/mace/ops/opencl/cl/conv_2d_buffer.cl +++ b/mace/ops/opencl/cl/conv_2d_buffer.cl @@ -22,7 +22,7 @@ __kernel void conv2d(BUFFER_OUT_OF_RANGE_PARAMS __private const int dilated_h_offset, __private const int dilated_w_offset, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __global OUT_DATA_TYPE *output) { const int out_wc_blk_idx = get_global_id(0); const int out_hb_idx = get_global_id(1); @@ -108,11 +108,11 @@ __kernel void conv2d(BUFFER_OUT_OF_RANGE_PARAMS } } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); #endif int out_offset = mad24(mad24(mad24(batch_idx, out_height, out_height_idx), diff --git a/mace/ops/opencl/cl/deconv_2d.cl b/mace/ops/opencl/cl/deconv_2d.cl index ef7c70250fb0ea080b011c5349dbe98c931bf417..0a787771b2901ef07bbea6b1f4bd05c70b6cbe56 100644 --- a/mace/ops/opencl/cl/deconv_2d.cl +++ b/mace/ops/opencl/cl/deconv_2d.cl @@ -9,7 +9,7 @@ __kernel void deconv_2d(OUT_OF_RANGE_PARAMS #endif __write_only image2d_t output, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __private const int in_height, __private const int in_width, __private const int in_channels, @@ -129,12 +129,12 @@ __kernel void deconv_2d(OUT_OF_RANGE_PARAMS } } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); - out4 = do_activation(out4, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); + out4 = do_activation(out4, relux_max_limit, activation_coefficient); #endif int2 out_pos; diff --git a/mace/ops/opencl/cl/depthwise_conv2d.cl b/mace/ops/opencl/cl/depthwise_conv2d.cl index 5a611968bcdf34d4bc870ef27eee71fa4f198f09..e5bcdcbc612b13c8d942376e6a5d03ae9aa91109 100644 --- a/mace/ops/opencl/cl/depthwise_conv2d.cl +++ b/mace/ops/opencl/cl/depthwise_conv2d.cl @@ -10,7 +10,7 @@ __kernel void depthwise_conv2d(OUT_OF_RANGE_PARAMS #endif __write_only image2d_t output, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __private const short in_height, __private const short in_width, __private const short in_ch_blks, @@ -113,11 +113,11 @@ __kernel void depthwise_conv2d(OUT_OF_RANGE_PARAMS in_hb_idx += dilation_h; } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); #endif const short out_x_base = mul24(out_ch_blk, out_width); @@ -146,7 +146,7 @@ __kernel void depthwise_conv2d_s1(OUT_OF_RANGE_PARAMS #endif __write_only image2d_t output, __private const DATA_TYPE relux_max_limit, - __private const DATA_TYPE leakyrelu_coefficient, + __private const DATA_TYPE activation_coefficient, __private const short in_height, __private const short in_width, __private const short in_ch_blks, @@ -240,11 +240,11 @@ __kernel void depthwise_conv2d_s1(OUT_OF_RANGE_PARAMS in_hb_idx += 1; } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); #endif const short out_x_base = mul24(out_ch_blk, out_width); diff --git a/mace/ops/opencl/cl/depthwise_conv2d_buffer.cl b/mace/ops/opencl/cl/depthwise_conv2d_buffer.cl index 6c42c11fcd3b4e52fe4c47bbce65c54c9fa9ddc1..b6661e6b403e4791a76e5d5022d628062f84d066 100644 --- a/mace/ops/opencl/cl/depthwise_conv2d_buffer.cl +++ b/mace/ops/opencl/cl/depthwise_conv2d_buffer.cl @@ -22,7 +22,7 @@ __kernel void depthwise_conv2d(BUFFER_OUT_OF_RANGE_PARAMS __private const int dilated_h_offset, __private const int dilated_w_offset, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __global OUT_DATA_TYPE *output) { const int out_wc_blk_idx = get_global_id(0); const int out_hb_idx = get_global_id(1); @@ -86,11 +86,11 @@ __kernel void depthwise_conv2d(BUFFER_OUT_OF_RANGE_PARAMS } } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); #endif int out_offset = mad24(mad24(mad24(batch_idx, out_height, out_height_idx), diff --git a/mace/ops/opencl/cl/depthwise_deconv2d.cl b/mace/ops/opencl/cl/depthwise_deconv2d.cl index b86bc96d8f1b3f509e4b030d3f5b02610e194d5e..72bdbf9cb91fa95e8a08ef3aaf9a6b136a727259 100644 --- a/mace/ops/opencl/cl/depthwise_deconv2d.cl +++ b/mace/ops/opencl/cl/depthwise_deconv2d.cl @@ -9,7 +9,7 @@ __kernel void depthwise_deconv2d(OUT_OF_RANGE_PARAMS #endif __write_only image2d_t output, __private const float relux_max_limit, - __private const float leakyrelu_coefficient, + __private const float activation_coefficient, __private const int in_height, __private const int in_width, __private const int out_height, @@ -109,12 +109,12 @@ __kernel void depthwise_deconv2d(OUT_OF_RANGE_PARAMS } } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0 = do_activation(out0, relux_max_limit, leakyrelu_coefficient); - out1 = do_activation(out1, relux_max_limit, leakyrelu_coefficient); - out2 = do_activation(out2, relux_max_limit, leakyrelu_coefficient); - out3 = do_activation(out3, relux_max_limit, leakyrelu_coefficient); - out4 = do_activation(out4, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0 = do_activation(out0, relux_max_limit, activation_coefficient); + out1 = do_activation(out1, relux_max_limit, activation_coefficient); + out2 = do_activation(out2, relux_max_limit, activation_coefficient); + out3 = do_activation(out3, relux_max_limit, activation_coefficient); + out4 = do_activation(out4, relux_max_limit, activation_coefficient); #endif diff --git a/mace/ops/opencl/cl/fully_connected.cl b/mace/ops/opencl/cl/fully_connected.cl index f7f4bc48a5a9db922063d91e8ea4a010ef69c5c9..b9b471731d86b1ab5613e9cdf8ed4052d1534bea 100644 --- a/mace/ops/opencl/cl/fully_connected.cl +++ b/mace/ops/opencl/cl/fully_connected.cl @@ -13,7 +13,7 @@ __kernel void fully_connected(OUT_OF_RANGE_PARAMS __private const int input_width, __private const int input_channel, __private const float relux_max_limit, - __private const float leakyrelu_coefficient) { + __private const float activation_coefficient) { const int batch_idx = get_global_id(0); const int out_blk_idx = get_global_id(1); const int input_chan_blk = (input_channel + 3) >> 2; @@ -57,8 +57,8 @@ __kernel void fully_connected(OUT_OF_RANGE_PARAMS input_coord.y++; } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - result = do_activation(result, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + result = do_activation(result, relux_max_limit, activation_coefficient); #endif WRITE_IMAGET(output, (int2)(out_blk_idx, batch_idx), result); @@ -79,7 +79,7 @@ __kernel void fully_connected_width(OUT_OF_RANGE_PARAMS __private const int in_chan_blks, __private const int out_blks, __private const float relux_max_limit, - __private const float leakyrelu_coefficient) { + __private const float activation_coefficient) { const int inter_out_idx = get_global_id(0); const int width_blk_idx = get_global_id(1); const int width_blk_count = global_size_dim1; @@ -149,8 +149,8 @@ __kernel void fully_connected_width(OUT_OF_RANGE_PARAMS inter_idx += 4; } -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - result = do_activation(result, relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + result = do_activation(result, relux_max_limit, activation_coefficient); #endif WRITE_IMAGET(output, (int2)(out_blk_idx, batch_idx), result); diff --git a/mace/ops/opencl/cl/winograd_transform.cl b/mace/ops/opencl/cl/winograd_transform.cl index d30427b5ad07a119de65cccad9b10f1f83e69928..169d614810255ec2311b8c874aadb91e55832409 100644 --- a/mace/ops/opencl/cl/winograd_transform.cl +++ b/mace/ops/opencl/cl/winograd_transform.cl @@ -128,7 +128,7 @@ __kernel void winograd_inverse_transform_2x2(OUT_OF_RANGE_PARAMS __private const int round_hw, __private const int round_w, __private const float relux_max_limit, - __private const float leakyrelu_coefficient) { + __private const float activation_coefficient) { const int width_idx = get_global_id(0); const int height_idx = get_global_id(1); @@ -204,11 +204,11 @@ __kernel void winograd_inverse_transform_2x2(OUT_OF_RANGE_PARAMS #endif -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - in0[0] = do_activation(in0[0], relux_max_limit, leakyrelu_coefficient); - in0[1] = do_activation(in0[1], relux_max_limit, leakyrelu_coefficient); - in1[0] = do_activation(in1[0], relux_max_limit, leakyrelu_coefficient); - in1[1] = do_activation(in1[1], relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + in0[0] = do_activation(in0[0], relux_max_limit, activation_coefficient); + in0[1] = do_activation(in0[1], relux_max_limit, activation_coefficient); + in1[0] = do_activation(in1[0], relux_max_limit, activation_coefficient); + in1[1] = do_activation(in1[1], relux_max_limit, activation_coefficient); #endif WRITE_IMAGET(output, (int2)(coord_x, coord_y), in0[0]); @@ -397,7 +397,7 @@ __kernel void winograd_inverse_transform_4x4(OUT_OF_RANGE_PARAMS __private const int round_hw, __private const int round_w, __private const float relux_max_limit, - __private const float leakyrelu_coefficient) { + __private const float activation_coefficient) { const int width_idx = get_global_id(0); const int height_idx = get_global_id(1); @@ -517,23 +517,23 @@ __kernel void winograd_inverse_transform_4x4(OUT_OF_RANGE_PARAMS out3[3] += bias_value; #endif -#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) - out0[0] = do_activation(out0[0], relux_max_limit, leakyrelu_coefficient); - out0[1] = do_activation(out0[1], relux_max_limit, leakyrelu_coefficient); - out0[2] = do_activation(out0[2], relux_max_limit, leakyrelu_coefficient); - out0[3] = do_activation(out0[3], relux_max_limit, leakyrelu_coefficient); - out1[0] = do_activation(out1[0], relux_max_limit, leakyrelu_coefficient); - out1[1] = do_activation(out1[1], relux_max_limit, leakyrelu_coefficient); - out1[2] = do_activation(out1[2], relux_max_limit, leakyrelu_coefficient); - out1[3] = do_activation(out1[3], relux_max_limit, leakyrelu_coefficient); - out2[0] = do_activation(out2[0], relux_max_limit, leakyrelu_coefficient); - out2[1] = do_activation(out2[1], relux_max_limit, leakyrelu_coefficient); - out2[2] = do_activation(out2[2], relux_max_limit, leakyrelu_coefficient); - out2[3] = do_activation(out2[3], relux_max_limit, leakyrelu_coefficient); - out3[0] = do_activation(out3[0], relux_max_limit, leakyrelu_coefficient); - out3[1] = do_activation(out3[1], relux_max_limit, leakyrelu_coefficient); - out3[2] = do_activation(out3[2], relux_max_limit, leakyrelu_coefficient); - out3[3] = do_activation(out3[3], relux_max_limit, leakyrelu_coefficient); +#if defined(USE_RELU) || defined(USE_LEAKYRELU) || defined(USE_RELUX) || defined(USE_TANH) || defined(USE_SIGMOID) || defined(USE_ELU) + out0[0] = do_activation(out0[0], relux_max_limit, activation_coefficient); + out0[1] = do_activation(out0[1], relux_max_limit, activation_coefficient); + out0[2] = do_activation(out0[2], relux_max_limit, activation_coefficient); + out0[3] = do_activation(out0[3], relux_max_limit, activation_coefficient); + out1[0] = do_activation(out1[0], relux_max_limit, activation_coefficient); + out1[1] = do_activation(out1[1], relux_max_limit, activation_coefficient); + out1[2] = do_activation(out1[2], relux_max_limit, activation_coefficient); + out1[3] = do_activation(out1[3], relux_max_limit, activation_coefficient); + out2[0] = do_activation(out2[0], relux_max_limit, activation_coefficient); + out2[1] = do_activation(out2[1], relux_max_limit, activation_coefficient); + out2[2] = do_activation(out2[2], relux_max_limit, activation_coefficient); + out2[3] = do_activation(out2[3], relux_max_limit, activation_coefficient); + out3[0] = do_activation(out3[0], relux_max_limit, activation_coefficient); + out3[1] = do_activation(out3[1], relux_max_limit, activation_coefficient); + out3[2] = do_activation(out3[2], relux_max_limit, activation_coefficient); + out3[3] = do_activation(out3[3], relux_max_limit, activation_coefficient); #endif const int num = min(4, out_width - out_width_idx); diff --git a/mace/ops/opencl/conv_2d.h b/mace/ops/opencl/conv_2d.h index d6dd40bd6d05c5e5d96af649190c6b9a1ef60822..d59273e7eaac1baaebfb8b2168a0ee8217a5ec1b 100644 --- a/mace/ops/opencl/conv_2d.h +++ b/mace/ops/opencl/conv_2d.h @@ -46,7 +46,7 @@ class OpenCLConv2dKernel { const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const int winograd_blk_size, Tensor *output) = 0; MACE_EMPTY_VIRTUAL_DESTRUCTOR(OpenCLConv2dKernel); diff --git a/mace/ops/opencl/deconv_2d.h b/mace/ops/opencl/deconv_2d.h index 3335bebf967ba0321d30cce0ff0b249fcffcacd0..fc486c591f2ba162b75710d8d761a091d33044a0 100644 --- a/mace/ops/opencl/deconv_2d.h +++ b/mace/ops/opencl/deconv_2d.h @@ -39,7 +39,7 @@ class OpenCLDeconv2dKernel { const int *padding_data, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const std::vector &output_shape, Tensor *output) = 0; MACE_EMPTY_VIRTUAL_DESTRUCTOR(OpenCLDeconv2dKernel); diff --git a/mace/ops/opencl/depthwise_conv2d.h b/mace/ops/opencl/depthwise_conv2d.h index 98f97a2016eff313494beb79656fbdedfa15c5d4..ddf3b035d913d90841dfcd1e527bce80dfb6b7c4 100644 --- a/mace/ops/opencl/depthwise_conv2d.h +++ b/mace/ops/opencl/depthwise_conv2d.h @@ -38,7 +38,7 @@ class OpenCLDepthwiseConv2dKernel { const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, Tensor *output) = 0; MACE_EMPTY_VIRTUAL_DESTRUCTOR(OpenCLDepthwiseConv2dKernel); }; diff --git a/mace/ops/opencl/depthwise_deconv2d.h b/mace/ops/opencl/depthwise_deconv2d.h index 462010729589fcee949f6d64c2387de55f0e44a8..f0426a7be345039b939ea311af08b911fbf50c58 100644 --- a/mace/ops/opencl/depthwise_deconv2d.h +++ b/mace/ops/opencl/depthwise_deconv2d.h @@ -42,7 +42,7 @@ class OpenCLDepthwiseDeconv2dKernel { const int group, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const std::vector &output_shape, Tensor *output) = 0; MACE_EMPTY_VIRTUAL_DESTRUCTOR(OpenCLDepthwiseDeconv2dKernel); diff --git a/mace/ops/opencl/fully_connected.h b/mace/ops/opencl/fully_connected.h index 88c1cbaba293fcb42c059b46f5e62e0bcd9de70c..7a5ca7906ef98bb560d2764a8527101779f7dfcf 100644 --- a/mace/ops/opencl/fully_connected.h +++ b/mace/ops/opencl/fully_connected.h @@ -34,7 +34,7 @@ class OpenCLFullyConnectedKernel { const Tensor *bias, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, Tensor *output) = 0; MACE_EMPTY_VIRTUAL_DESTRUCTOR(OpenCLFullyConnectedKernel); }; diff --git a/mace/ops/opencl/image/activation.cc b/mace/ops/opencl/image/activation.cc index f013c99071bb3347b5303ea9302f3099bd3878b7..300a1da64146ad89633ba3b6b61df59c9d053f7a 100644 --- a/mace/ops/opencl/image/activation.cc +++ b/mace/ops/opencl/image/activation.cc @@ -99,12 +99,12 @@ MaceStatus ActivationKernel::Compute( MACE_OUT_OF_RANGE_SET_ARGS(kernel_); MACE_SET_3D_GWS_ARGS(kernel_, gws); kernel_.setArg(idx++, *(input->opencl_image())); - if (activation_ == PRELU || activation_ == ELU) { + if (activation_ == PRELU) { MACE_CHECK_NOTNULL(alpha); kernel_.setArg(idx++, *(alpha->opencl_image())); } kernel_.setArg(idx++, relux_max_limit_); - kernel_.setArg(idx++, leakyrelu_coefficient_); + kernel_.setArg(idx++, activation_coefficient_); kernel_.setArg(idx++, *(output->opencl_image())); input_shape_ = input->shape(); diff --git a/mace/ops/opencl/image/activation.h b/mace/ops/opencl/image/activation.h index bfbdc47c8cecb0dd10aa12dbcc17b6231fc9178a..2107964df84b7319f3ed5edff3682218502bd322 100644 --- a/mace/ops/opencl/image/activation.h +++ b/mace/ops/opencl/image/activation.h @@ -35,9 +35,9 @@ class ActivationKernel : public OpenCLActivationKernel { public: ActivationKernel(ActivationType type, float relux_max_limit, - float leakyrelu_coefficient) + float activation_coefficient) : activation_(type), relux_max_limit_(relux_max_limit), - leakyrelu_coefficient_(leakyrelu_coefficient) {} + activation_coefficient_(activation_coefficient) {} MaceStatus Compute( OpContext *context, @@ -48,7 +48,8 @@ class ActivationKernel : public OpenCLActivationKernel { private: ActivationType activation_; float relux_max_limit_; - float leakyrelu_coefficient_; + float activation_coefficient_; + cl::Kernel kernel_; uint32_t kwg_size_; std::vector input_shape_; diff --git a/mace/ops/opencl/image/batch_norm.cc b/mace/ops/opencl/image/batch_norm.cc index bfb496e77904f274d92a1846d25eeb14c12cc4aa..825233267f7c1fd5f6317d1c2378e9aeab271587 100644 --- a/mace/ops/opencl/image/batch_norm.cc +++ b/mace/ops/opencl/image/batch_norm.cc @@ -22,11 +22,11 @@ namespace image { BatchNormKernel::BatchNormKernel(const float epsilon, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient) + const float activation_coefficient) : epsilon_(epsilon), activation_(activation), relux_max_limit_(relux_max_limit), - leakyrelu_coefficient_(leakyrelu_coefficient) {} + activation_coefficient_(activation_coefficient) {} MaceStatus BatchNormKernel::Compute( OpContext *context, @@ -75,6 +75,8 @@ MaceStatus BatchNormKernel::Compute( break; case LEAKYRELU:built_options.emplace("-DUSE_LEAKYRELU"); break; + case ELU: built_options.emplace("-DUSE_ELU"); + break; default:LOG(FATAL) << "Unknown activation type: " << activation_; } @@ -99,7 +101,7 @@ MaceStatus BatchNormKernel::Compute( } kernel_.setArg(idx++, *(output->opencl_image())); kernel_.setArg(idx++, relux_max_limit_); - kernel_.setArg(idx++, leakyrelu_coefficient_); + kernel_.setArg(idx++, activation_coefficient_); input_shape_ = input->shape(); } diff --git a/mace/ops/opencl/image/batch_norm.h b/mace/ops/opencl/image/batch_norm.h index 9a93b534188cd658322ce0fcda42a1d97419f611..b6508f00412d24afaf318a7818345f868222d36c 100644 --- a/mace/ops/opencl/image/batch_norm.h +++ b/mace/ops/opencl/image/batch_norm.h @@ -37,7 +37,7 @@ class BatchNormKernel : public OpenCLBatchNormKernel { const float epsilon, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient); + const float activation_coefficient); MaceStatus Compute(OpContext *context, const Tensor *input, const Tensor *scale, @@ -50,7 +50,7 @@ class BatchNormKernel : public OpenCLBatchNormKernel { const float epsilon_; const ActivationType activation_; const float relux_max_limit_; - const float leakyrelu_coefficient_; + const float activation_coefficient_; cl::Kernel kernel_; uint32_t kwg_size_; std::vector input_shape_; diff --git a/mace/ops/opencl/image/conv_2d.cc b/mace/ops/opencl/image/conv_2d.cc index 60a9b15537356435a32b8ff5404091e18f471b43..4f57c1aa505394013d1c4f24632565027bc7c576 100644 --- a/mace/ops/opencl/image/conv_2d.cc +++ b/mace/ops/opencl/image/conv_2d.cc @@ -68,7 +68,7 @@ MaceStatus Conv2dKernel::Compute( const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const int wino_blk_size, Tensor *output) { index_t kernel_h = filter->dim(2); @@ -116,7 +116,7 @@ MaceStatus Conv2dKernel::Compute( paddings.data(), activation, relux_max_limit, - leakyrelu_coefficient, + activation_coefficient, wino_blk_size, &input_shape_, output, @@ -135,7 +135,7 @@ MaceStatus Conv2dKernel::Compute( dilations, activation, relux_max_limit, - leakyrelu_coefficient, + activation_coefficient, &input_shape_, output, &kwg_size_[0]); @@ -153,7 +153,7 @@ MaceStatus Conv2dKernel::Compute( dilations, activation, relux_max_limit, - leakyrelu_coefficient, + activation_coefficient, &input_shape_, output, &kwg_size_[0]); @@ -171,7 +171,7 @@ MaceStatus Conv2dKernel::Compute( dilations, activation, relux_max_limit, - leakyrelu_coefficient, + activation_coefficient, &input_shape_, output, &kwg_size_[0]); diff --git a/mace/ops/opencl/image/conv_2d.h b/mace/ops/opencl/image/conv_2d.h index 1ecd913137891542c11117ee54f437877e655971..2d54ddc9d55b11bc443bf0d156491624d5ccea8e 100644 --- a/mace/ops/opencl/image/conv_2d.h +++ b/mace/ops/opencl/image/conv_2d.h @@ -39,7 +39,7 @@ extern MaceStatus Conv2dK1x1(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, std::vector *prev_input_shape, Tensor *output, uint32_t *kwg_size); @@ -55,7 +55,7 @@ extern MaceStatus Conv2dK3x3(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, std::vector *prev_input_shape, Tensor *output, uint32_t *kwg_size); @@ -71,7 +71,7 @@ extern MaceStatus Conv2d(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, std::vector *prev_input_shape, Tensor *output, uint32_t *kwg_size); @@ -84,7 +84,7 @@ extern MaceStatus WinogradConv2dK3x3S1(OpContext *context, const int *padding, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const int wino_blk_size, std::vector *prev_input_shape, Tensor *output, @@ -111,7 +111,7 @@ class Conv2dKernel : public OpenCLConv2dKernel { const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const int wino_blk_size, Tensor *output) override; diff --git a/mace/ops/opencl/image/conv_2d_1x1.cc b/mace/ops/opencl/image/conv_2d_1x1.cc index 2d4baa5bbcd5123a2542bb9db1cb1a871f7a6e9c..8827d99ead6c74f4608ee5a582ae156560ae3596 100644 --- a/mace/ops/opencl/image/conv_2d_1x1.cc +++ b/mace/ops/opencl/image/conv_2d_1x1.cc @@ -77,7 +77,7 @@ MaceStatus Conv2dK1x1(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, std::vector *prev_input_shape, Tensor *output, uint32_t *kwg_size) { @@ -135,6 +135,10 @@ MaceStatus Conv2dK1x1(OpContext *context, built_options.emplace("-DUSE_LEAKYRELU"); break; } + case ELU: { + built_options.emplace("-DUSE_ELU"); + break; + } default: { LOG(FATAL) << "Unknown activation type: " << activation; } @@ -165,7 +169,7 @@ MaceStatus Conv2dK1x1(OpContext *context, kernel->setArg(idx++, *(output->opencl_image())); // FIXME handle flexable data type: half not supported kernel->setArg(idx++, relux_max_limit); - kernel->setArg(idx++, leakyrelu_coefficient); + kernel->setArg(idx++, activation_coefficient); kernel->setArg(idx++, static_cast(input_height)); kernel->setArg(idx++, static_cast(input_width)); kernel->setArg(idx++, static_cast(input_channel_blocks)); diff --git a/mace/ops/opencl/image/conv_2d_3x3.cc b/mace/ops/opencl/image/conv_2d_3x3.cc index c5ea2890751f9cf9ca5a7455d6abd35fe323f98e..8f3b2939ff90581532824121ea5ed8a4e2e4602f 100644 --- a/mace/ops/opencl/image/conv_2d_3x3.cc +++ b/mace/ops/opencl/image/conv_2d_3x3.cc @@ -70,7 +70,7 @@ MaceStatus Conv2dK3x3(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, std::vector *prev_input_shape, Tensor *output, uint32_t *kwg_size) { @@ -120,6 +120,10 @@ MaceStatus Conv2dK3x3(OpContext *context, built_options.emplace("-DUSE_LEAKYRELU"); break; } + case ELU: { + built_options.emplace("-DUSE_ELU"); + break; + } default: { LOG(FATAL) << "Unknown activation type: " << activation; } @@ -149,7 +153,7 @@ MaceStatus Conv2dK3x3(OpContext *context, } kernel->setArg(idx++, *(output->opencl_image())); kernel->setArg(idx++, relux_max_limit); - kernel->setArg(idx++, leakyrelu_coefficient); + kernel->setArg(idx++, activation_coefficient); kernel->setArg(idx++, static_cast(input->dim(1))); kernel->setArg(idx++, static_cast(input->dim(2))); kernel->setArg(idx++, static_cast(input_channel_blocks)); diff --git a/mace/ops/opencl/image/conv_2d_general.cc b/mace/ops/opencl/image/conv_2d_general.cc index b84d83949d26235da4c51a135f4965ca6f8cfe3a..22be870966723cdff8a5d8271c40257e723f452a 100644 --- a/mace/ops/opencl/image/conv_2d_general.cc +++ b/mace/ops/opencl/image/conv_2d_general.cc @@ -78,7 +78,7 @@ MaceStatus Conv2d(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, std::vector *prev_input_shape, Tensor *output, uint32_t *kwg_size) { @@ -128,6 +128,9 @@ MaceStatus Conv2d(OpContext *context, built_options.emplace("-DUSE_LEAKYRELU"); break; } + case ELU: + built_options.emplace("-DUSE_ELU"); + break; default: { LOG(FATAL) << "Unknown activation type: " << activation; } @@ -157,7 +160,7 @@ MaceStatus Conv2d(OpContext *context, } kernel->setArg(idx++, *(output->opencl_image())); kernel->setArg(idx++, relux_max_limit); - kernel->setArg(idx++, leakyrelu_coefficient); + kernel->setArg(idx++, activation_coefficient); kernel->setArg(idx++, static_cast(input->dim(1))); kernel->setArg(idx++, static_cast(input->dim(2))); kernel->setArg(idx++, static_cast(input_channel_blocks)); diff --git a/mace/ops/opencl/image/deconv_2d.cc b/mace/ops/opencl/image/deconv_2d.cc index 0509fcf005dc9abf20ad241cf45e8e3cd755a1c7..8284b1dd0ef46e50df859e066f9f62864402b098 100644 --- a/mace/ops/opencl/image/deconv_2d.cc +++ b/mace/ops/opencl/image/deconv_2d.cc @@ -29,7 +29,7 @@ MaceStatus Deconv2dKernel::Compute( const int *padding_data, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const std::vector &output_shape, Tensor *output) { std::vector output_image_shape; @@ -90,6 +90,9 @@ MaceStatus Deconv2dKernel::Compute( case LEAKYRELU: built_options.emplace("-DUSE_LEAKYRELU"); break; + case ELU: + built_options.emplace("-DUSE_ELU"); + break; default: LOG(FATAL) << "Unknown activation type: " << activation; } @@ -117,7 +120,7 @@ MaceStatus Deconv2dKernel::Compute( } kernel_.setArg(idx++, *(output->opencl_image())); kernel_.setArg(idx++, relux_max_limit); - kernel_.setArg(idx++, leakyrelu_coefficient); + kernel_.setArg(idx++, activation_coefficient); kernel_.setArg(idx++, static_cast(input->dim(1))); kernel_.setArg(idx++, static_cast(input->dim(2))); kernel_.setArg(idx++, static_cast(input->dim(3))); diff --git a/mace/ops/opencl/image/deconv_2d.h b/mace/ops/opencl/image/deconv_2d.h index 2ab385046f49ed629fa0b90d15b8d1b9416f5e59..928630b738f166e2c1b2f81475f6a0ee07195493 100644 --- a/mace/ops/opencl/image/deconv_2d.h +++ b/mace/ops/opencl/image/deconv_2d.h @@ -41,7 +41,7 @@ class Deconv2dKernel : public OpenCLDeconv2dKernel { const int *padding_data, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const std::vector &output_shape, Tensor *output) override; diff --git a/mace/ops/opencl/image/depthwise_conv2d.cc b/mace/ops/opencl/image/depthwise_conv2d.cc index 0101ea136a8b14d825c9d6ec89c074e0d005f01b..2a1776ce2e74b4f7d0eb434444e9859593321d10 100644 --- a/mace/ops/opencl/image/depthwise_conv2d.cc +++ b/mace/ops/opencl/image/depthwise_conv2d.cc @@ -73,7 +73,7 @@ MaceStatus DepthwiseConv2d(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, std::vector *prev_input_shape, Tensor *output, uint32_t *kwg_size) { @@ -129,6 +129,9 @@ MaceStatus DepthwiseConv2d(OpContext *context, case LEAKYRELU: built_options.emplace("-DUSE_LEAKYRELU"); break; + case ELU: + built_options.emplace("-DUSE_ELU"); + break; default: LOG(FATAL) << "Unknown activation type: " << activation; } @@ -162,7 +165,7 @@ MaceStatus DepthwiseConv2d(OpContext *context, } kernel->setArg(idx++, *(output->opencl_image())); kernel->setArg(idx++, relux_max_limit); - kernel->setArg(idx++, leakyrelu_coefficient); + kernel->setArg(idx++, activation_coefficient); kernel->setArg(idx++, static_cast(input_height)); kernel->setArg(idx++, static_cast(input_width)); kernel->setArg(idx++, static_cast(input_channel_blocks)); @@ -204,7 +207,7 @@ MaceStatus DepthwiseConv2dKernel::Compute( const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, Tensor *output) { index_t kernel_h = filter->dim(2); index_t kernel_w = filter->dim(3); @@ -243,7 +246,7 @@ MaceStatus DepthwiseConv2dKernel::Compute( return depthwise::DepthwiseConv2d( context, &kernel_, input, filter, bias, strides[0], paddings.data(), - dilations, activation, relux_max_limit, leakyrelu_coefficient, + dilations, activation, relux_max_limit, activation_coefficient, &input_shape_, output, &kwg_size_); } diff --git a/mace/ops/opencl/image/depthwise_conv2d.h b/mace/ops/opencl/image/depthwise_conv2d.h index fc8833ddf6e842a6a6f4529822d7270457e76768..52b036ded9bc905261cf049a3a8a134b64cdb93b 100644 --- a/mace/ops/opencl/image/depthwise_conv2d.h +++ b/mace/ops/opencl/image/depthwise_conv2d.h @@ -39,7 +39,7 @@ MaceStatus DepthwiseConv2d(OpContext *context, const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, std::vector *prev_input_shape, Tensor *output, uint32_t *kwg_size); @@ -58,7 +58,7 @@ class DepthwiseConv2dKernel : public OpenCLDepthwiseConv2dKernel { const int *dilations, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, Tensor *output) override; private: diff --git a/mace/ops/opencl/image/depthwise_deconv2d.cc b/mace/ops/opencl/image/depthwise_deconv2d.cc index 6a8d7eb9919959cc63bcc01f127344fb72ee8af5..59331290fe2a8ac32a94a8677b3c0e95dfa849e8 100644 --- a/mace/ops/opencl/image/depthwise_deconv2d.cc +++ b/mace/ops/opencl/image/depthwise_deconv2d.cc @@ -30,7 +30,7 @@ MaceStatus DepthwiseDeconv2dKernel::Compute( const int group, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const std::vector &output_shape, Tensor *output) { const index_t batch = output_shape[0]; @@ -95,6 +95,9 @@ MaceStatus DepthwiseDeconv2dKernel::Compute( case LEAKYRELU: built_options.emplace("-DUSE_LEAKYRELU"); break; + case ELU: + built_options.emplace("-DUSE_ELU"); + break; default: LOG(FATAL) << "Unknown activation type: " << activation; } @@ -122,7 +125,7 @@ MaceStatus DepthwiseDeconv2dKernel::Compute( } kernel_.setArg(idx++, *(output->opencl_image())); kernel_.setArg(idx++, relux_max_limit); - kernel_.setArg(idx++, leakyrelu_coefficient); + kernel_.setArg(idx++, activation_coefficient); kernel_.setArg(idx++, static_cast(input->dim(1))); kernel_.setArg(idx++, static_cast(input->dim(2))); kernel_.setArg(idx++, static_cast(height)); diff --git a/mace/ops/opencl/image/depthwise_deconv2d.h b/mace/ops/opencl/image/depthwise_deconv2d.h index 4643a9c1f46bc50b7d3cafa3e93649854113617f..e58e154bd5b1eedc788d23f32aad62ac9971b269 100644 --- a/mace/ops/opencl/image/depthwise_deconv2d.h +++ b/mace/ops/opencl/image/depthwise_deconv2d.h @@ -42,7 +42,7 @@ class DepthwiseDeconv2dKernel : public OpenCLDepthwiseDeconv2dKernel { const int group, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const std::vector &output_shape, Tensor *output) override; diff --git a/mace/ops/opencl/image/fully_connected.cc b/mace/ops/opencl/image/fully_connected.cc index 9ec83e91b771d49abc379d4aa312dd5caa90ac18..4fc7d71917e613c9f7f766cbe92204c9b7007fc8 100644 --- a/mace/ops/opencl/image/fully_connected.cc +++ b/mace/ops/opencl/image/fully_connected.cc @@ -27,7 +27,7 @@ MaceStatus FullyConnectedKernel::Compute( const Tensor *bias, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, Tensor *output) { std::vector output_shape = {input->dim(0), 1, 1, weight->dim(0)}; std::vector output_image_shape; @@ -71,6 +71,9 @@ MaceStatus FullyConnectedKernel::Compute( case LEAKYRELU: built_options.emplace("-DUSE_LEAKYRELU"); break; + case ELU: + built_options.emplace("-DUSE_ELU"); + break; default: LOG(FATAL) << "Unknown activation type: " << activation; } @@ -121,7 +124,7 @@ MaceStatus FullyConnectedKernel::Compute( kernel_.setArg(idx++, static_cast(RoundUpDiv4(input->dim(3)))); kernel_.setArg(idx++, static_cast(output_blocks)); kernel_.setArg(idx++, relux_max_limit); - kernel_.setArg(idx++, leakyrelu_coefficient); + kernel_.setArg(idx++, activation_coefficient); input_shape_ = input->shape(); } diff --git a/mace/ops/opencl/image/fully_connected.h b/mace/ops/opencl/image/fully_connected.h index 46a93a6173a90e926a316a8f299df6b5e7f118ee..cd063cb5d2cef520f1c7419c4ce9590118001b76 100644 --- a/mace/ops/opencl/image/fully_connected.h +++ b/mace/ops/opencl/image/fully_connected.h @@ -40,7 +40,7 @@ class FullyConnectedKernel : public OpenCLFullyConnectedKernel { const Tensor *bias, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, Tensor *output) override; private: diff --git a/mace/ops/opencl/image/winograd_conv2d.cc b/mace/ops/opencl/image/winograd_conv2d.cc index e5c2be9756f19aa70e1cdcfca7bb4c79faf4617b..0ae87013aaec01187ce3d14de1ee6d3302c54f92 100644 --- a/mace/ops/opencl/image/winograd_conv2d.cc +++ b/mace/ops/opencl/image/winograd_conv2d.cc @@ -113,7 +113,7 @@ MaceStatus WinogradOutputTransform(OpContext *context, const int wino_blk_size, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const bool input_changed, Tensor *output_tensor, uint32_t *kwg_size, @@ -213,7 +213,7 @@ MaceStatus WinogradOutputTransform(OpContext *context, kernel->setArg(idx++, static_cast(round_h * round_w)); kernel->setArg(idx++, static_cast(round_w)); kernel->setArg(idx++, relux_max_limit); - kernel->setArg(idx++, leakyrelu_coefficient); + kernel->setArg(idx++, activation_coefficient); } const std::vector lws = {*kwg_size / 8, 8, 0}; std::string tuning_key = @@ -237,7 +237,7 @@ extern MaceStatus WinogradConv2dK3x3S1(OpContext *context, const int *paddings, const ActivationType activation, const float relux_max_limit, - const float leakyrelu_coefficient, + const float activation_coefficient, const int wino_blk_size, std::vector *prev_input_shape, Tensor *output, @@ -355,7 +355,7 @@ extern MaceStatus WinogradConv2dK3x3S1(OpContext *context, MACE_RETURN_IF_ERROR(WinogradOutputTransform( context, kernels[2], mm_output.get(), bias, round_h, round_w, wino_blk_size, activation, relux_max_limit, - leakyrelu_coefficient, input_changed, output, kwg_size[2], + activation_coefficient, input_changed, output, kwg_size[2], &t_output_future)) MergeMultipleFutureWaitFn({t_input_future, mm_future, t_output_future}, diff --git a/mace/ops/ref/activation.cc b/mace/ops/ref/activation.cc index bb79853108c27ae233fce225ac2bd3172b97ae4d..c940270949c7c455ce5fc989e7c6ae9cba013e7e 100644 --- a/mace/ops/ref/activation.cc +++ b/mace/ops/ref/activation.cc @@ -81,7 +81,7 @@ void Activation::DoActivation(const OpContext *context, for (index_t i = 0; i < size; ++i) { *output_ptr = std::max(*input_ptr, 0.f) - + std::min(*input_ptr, 0.f) * leakyrelu_coefficient_; + + std::min(*input_ptr, 0.f) * activation_coefficient_; ++input_ptr; ++output_ptr; } @@ -104,6 +104,19 @@ void Activation::DoActivation(const OpContext *context, break; } + case ELU: { + for (index_t i = 0; i < input->size(); ++i) { + const auto in_val = *input_ptr++; + if (in_val < 0) { + *output_ptr = (std::exp(in_val) - 1) * activation_coefficient_; + } else { + *output_ptr = in_val; + } + output_ptr++; + } + break; + } + case NOOP:break; default:MACE_NOT_IMPLEMENTED; diff --git a/mace/ops/reverse.cc b/mace/ops/reverse.cc index d49f14a146775b25d68414e2034fe28c8186f993..6f6e02478a970a330ec3febf49f38078b8f61394 100644 --- a/mace/ops/reverse.cc +++ b/mace/ops/reverse.cc @@ -32,6 +32,8 @@ class ReverseOp : public Operation { const Tensor *input = this->Input(INPUT); const Tensor *axis = this->Input(AXIS); Tensor *output = this->Output(OUTPUT); + Tensor::MappingGuard input_guard(input); + Tensor::MappingGuard axis_guard(axis); MACE_CHECK(axis->dim_size() == 1, "Only support reverse in one axis now"); diff --git a/micro/ops/utils/activation.cc b/micro/ops/utils/activation.cc index 29f34d2f9ec7f0ce9dfe3c1ba079397895b67ab3..4df6832827bf048fa0064b51ac8d39c3fea3e71a 100644 --- a/micro/ops/utils/activation.cc +++ b/micro/ops/utils/activation.cc @@ -31,17 +31,17 @@ MaceStatus Activation::Init(const framework::Operator *op) { atcivation_type = "NOOP"; } const float max_limit = op->GetArgByName("max_limit", 0.0f); - const float leakyrelu_coefficient = - op->GetArgByName("leakyrelu_coefficient", 0.0f); + const float activation_coefficient = + op->GetArgByName("activation_coefficient", 0.0f); - return Init(atcivation_type, max_limit, leakyrelu_coefficient); + return Init(atcivation_type, max_limit, activation_coefficient); } MaceStatus Activation::Init(const char *type, const float limit, - const float leakyrelu_coefficient) { + const float activation_coefficient) { type_ = StringToActivationType(type); limit_ = limit; - leakyrelu_coefficient_ = leakyrelu_coefficient; + activation_coefficient_ = activation_coefficient; return MACE_SUCCESS; } @@ -71,7 +71,7 @@ MaceStatus Activation::Compute(const mifloat *input_ptr, for (int32_t i = 0; i < size; ++i) { float input = *input_ptr; *output_ptr = base::max(input, 0.f) + - base::min(input, 0.f) * leakyrelu_coefficient_; // NOLINT + base::min(input, 0.f) * activation_coefficient_; // NOLINT ++input_ptr; ++output_ptr; } diff --git a/micro/ops/utils/activation.h b/micro/ops/utils/activation.h index f27861574c785ee722f311ae5ae37adeb8558538..2a4d5c677831114ded8fe192883fc9bccff67988 100644 --- a/micro/ops/utils/activation.h +++ b/micro/ops/utils/activation.h @@ -44,7 +44,7 @@ class Activation { MaceStatus Init(const framework::Operator *op); MaceStatus Init(const char *type, const float limit, - const float leakyrelu_coefficient); + const float activation_coefficient); MaceStatus Compute(const mifloat *input_ptr, const int32_t size, mifloat *output_ptr); ActivationType GetActivationType(); @@ -55,7 +55,7 @@ class Activation { private: ActivationType type_; float limit_; - float leakyrelu_coefficient_; + float activation_coefficient_; }; } // namespace ops diff --git a/micro/test/ccunit/micro/ops/activation_test.cc b/micro/test/ccunit/micro/ops/activation_test.cc index 693f13cd6ffbaa9e300207b5bd5f82f1d7091055..b38f5053b8a8cf7697b8da7ddd10e036a2ef79f3 100644 --- a/micro/test/ccunit/micro/ops/activation_test.cc +++ b/micro/test/ccunit/micro/ops/activation_test.cc @@ -67,7 +67,7 @@ void TestSimpleLeakyRelu() { framework::SubstituteOp substitude_op; substitude_op.AddInput(input, input_dims, 4) .AddRepeatArg("activation", activation_type, arg_type_len) - .AddArg("leakyrelu_coefficient", 0.1f) + .AddArg("activation_coefficient", 0.1f) .AddOutput(output, output_dims, 4); activation_op.Init(NULL, reinterpret_cast( diff --git a/test/ccbenchmark/mace/ops/activation_benchmark.cc b/test/ccbenchmark/mace/ops/activation_benchmark.cc index a991e6d0e3c4d321042fb9983bfe6a03fff37d7a..18fe6ffece1e1a78386d5bfb828d896e653f4bc5 100644 --- a/test/ccbenchmark/mace/ops/activation_benchmark.cc +++ b/test/ccbenchmark/mace/ops/activation_benchmark.cc @@ -250,14 +250,14 @@ void EluBenchmark(int iters, int batch, int channels, int height, int width) { } else { MACE_NOT_IMPLEMENTED; } - net.AddRandomInput("Alpha", {channels}, true); OpDefBuilder("Activation", "EluBM") .Input("Input") - .Input("Alpha") .Output("Output") .AddStringArg("activation", "ELU") + .AddFloatArg("activation_coefficient", 1.0) .AddIntArg("T", static_cast(DataTypeToEnum::value)) + .Finalize(net.NewOperatorDef()); // Warm-up diff --git a/test/ccunit/mace/ops/activation_test.cc b/test/ccunit/mace/ops/activation_test.cc index f9b8bdb0fb0397b34b606a2c6e1216185c3dd7c6..3988faf6536d17000663e6f94d6a43f2eee08f71 100644 --- a/test/ccunit/mace/ops/activation_test.cc +++ b/test/ccunit/mace/ops/activation_test.cc @@ -66,7 +66,7 @@ void TestSimpleLeakyRelu() { .Input("Input") .Output("Output") .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .Finalize(net.NewOperatorDef()); // Run @@ -243,15 +243,14 @@ void TestSimpleElu() { // Add input data net.AddInputFromArray( "Input", {2, 2, 2, 2}, - {-7, 7, -6, 6, -5, -5, -4, -4, -3, 3, -2, 2, -1, -1, 0, 0}); - net.AddInputFromArray("Alpha", {2}, {2.0, 3.0}, true); + {-7, 7, -6, 6, -5, 5, -4, 4, -3, 3, -2, 2, -1, 1, 0, 0}); if (D == DeviceType::GPU) { OpDefBuilder("Activation", "EluTest") .Input("Input") - .Input("Alpha") .Output("Output") .AddStringArg("activation", "ELU") + .AddFloatArg("activation_coefficient", 2.0) .Finalize(net.NewOperatorDef()); // Run @@ -261,9 +260,9 @@ void TestSimpleElu() { "Input", DataFormat::NHWC, "InputNCHW", DataFormat::NCHW); OpDefBuilder("Activation", "EluTest") .Input("InputNCHW") - .Input("Alpha") .Output("OutputNCHW") .AddStringArg("activation", "ELU") + .AddFloatArg("activation_coefficient", 2.0) .Finalize(net.NewOperatorDef()); // Run @@ -275,9 +274,9 @@ void TestSimpleElu() { auto expected = net.CreateTensor( {2, 2, 2, 2}, {-1.998176236068891, 7, -1.9950424956466672, 6, -1.986524106001829, - -2.9797861590027437, -1.9633687222225316, -2.9450530833337973, + 5, -1.9633687222225316, 4, -1.900425863264272, 3, -1.7293294335267746, 2, -1.2642411176571153, - -1.896361676485673, 0, 0}); + 1, 0, 0}); ExpectTensorNear(*expected, *net.GetOutput("Output"), 1e-5); } } // namespace @@ -439,7 +438,7 @@ void TestBFloat16(const char *activation) { .Input("Alpha") .Output("Output") .AddStringArg("activation", activation) - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .AddFloatArg("max_limit", 6) .AddIntArg("T", static_cast(DT_FLOAT)) .Finalize(net.NewOperatorDef()); @@ -450,7 +449,7 @@ void TestBFloat16(const char *activation) { .Input("BF16Alpha") .Output("BF16Output") .AddStringArg("activation", activation) - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .AddFloatArg("max_limit", 6) .AddIntArg("T", static_cast(DT_BFLOAT16)) .Finalize(net.NewOperatorDef()); diff --git a/test/ccunit/mace/ops/batch_norm_test.cc b/test/ccunit/mace/ops/batch_norm_test.cc index 0a07fc64cf60ef44513fc9d6d547c1b26d105edb..25edcaf5c37dcec1fdf7a517f40018ae579db4eb 100644 --- a/test/ccunit/mace/ops/batch_norm_test.cc +++ b/test/ccunit/mace/ops/batch_norm_test.cc @@ -108,7 +108,7 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) { .AddFloatArg("epsilon", 1e-3) .Output("OutputNCHW") .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .Finalize(net.NewOperatorDef()); // run cpu @@ -131,7 +131,7 @@ TEST_F(BatchNormOpTest, SimpleRandomOPENCL) { .AddFloatArg("epsilon", 1e-3) .Output("Output") .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .Finalize(net.NewOperatorDef()); net.Setup(DeviceType::GPU); diff --git a/test/ccunit/mace/ops/conv_2d_test.cc b/test/ccunit/mace/ops/conv_2d_test.cc index 8205c59064dfbae7e12b3fc4a0a211d3b8848722..6d04d05e682f53e18f8f6710777c23e46a007dce 100644 --- a/test/ccunit/mace/ops/conv_2d_test.cc +++ b/test/ccunit/mace/ops/conv_2d_test.cc @@ -684,7 +684,7 @@ void TestComplexConvNxN(const std::vector &shape, .AddIntArg("padding", type) .AddIntsArg("dilations", {1, 1}) .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .Finalize(net.NewOperatorDef()); @@ -709,7 +709,7 @@ void TestComplexConvNxN(const std::vector &shape, .AddIntArg("padding", type) .AddIntsArg("dilations", {1, 1}) .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .AddIntArg("wino_block_size", wino_blk_size) .Finalize(net.NewOperatorDef()); diff --git a/test/ccunit/mace/ops/deconv_2d_test.cc b/test/ccunit/mace/ops/deconv_2d_test.cc index 9ea8161ef47de3e40e4f1260e00ead158e48d740..83c92efac8eb6ea9345396b3f59d7f06dc0ac150 100644 --- a/test/ccunit/mace/ops/deconv_2d_test.cc +++ b/test/ccunit/mace/ops/deconv_2d_test.cc @@ -421,7 +421,7 @@ void TestComplexDeconvNxN(const int batch, .AddIntsArg("padding_values", paddings) .AddIntArg("framework_type", model_type) .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .Finalize(net.NewOperatorDef()); } else { @@ -459,7 +459,7 @@ void TestComplexDeconvNxN(const int batch, .AddIntsArg("padding_values", paddings) .AddIntArg("framework_type", model_type) .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .Finalize(net.NewOperatorDef()); } else { diff --git a/test/ccunit/mace/ops/depthwise_conv2d_test.cc b/test/ccunit/mace/ops/depthwise_conv2d_test.cc index b0db1b3deb1f6344956e1d879b88ad6ae5b1e403..f3585fcdec64601cea63ccd3a0f6eb1249e6ca17 100644 --- a/test/ccunit/mace/ops/depthwise_conv2d_test.cc +++ b/test/ccunit/mace/ops/depthwise_conv2d_test.cc @@ -261,7 +261,7 @@ void TestNxNS12(const index_t height, const index_t width) { .AddIntsArg("dilations", {1, 1}) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .Finalize(net.NewOperatorDef()); // Run on cpu @@ -284,7 +284,7 @@ void TestNxNS12(const index_t height, const index_t width) { .AddIntsArg("dilations", {1, 1}) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1) + .AddFloatArg("activation_coefficient", 0.1) .Finalize(net.NewOperatorDef()); net.RunOp(DeviceType::GPU); diff --git a/test/ccunit/mace/ops/depthwise_deconv2d_test.cc b/test/ccunit/mace/ops/depthwise_deconv2d_test.cc index fda0cf59b8d7182c896ee55b6290e1af02211ca3..0189939bee78faa7552d5f2bc544105610ccd25c 100644 --- a/test/ccunit/mace/ops/depthwise_deconv2d_test.cc +++ b/test/ccunit/mace/ops/depthwise_deconv2d_test.cc @@ -206,7 +206,7 @@ void RandomTest(index_t batch, .AddIntArg("group", channel) .AddIntsArg("dilations", {1, 1}) .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1f) + .AddFloatArg("activation_coefficient", 0.1f) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .Finalize(net.NewOperatorDef()); // Run @@ -229,7 +229,7 @@ void RandomTest(index_t batch, .AddIntsArg("padding_values", {padding, padding}) .AddIntArg("group", channel) .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1f) + .AddFloatArg("activation_coefficient", 0.1f) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .Finalize(net.NewOperatorDef()); diff --git a/test/ccunit/mace/ops/fully_connected_test.cc b/test/ccunit/mace/ops/fully_connected_test.cc index 25dcbebbad9477c873da5ab8c943e3040da7cf00..7de4ab9fb030bab16e334f5f638c0b1f6db2aa7d 100644 --- a/test/ccunit/mace/ops/fully_connected_test.cc +++ b/test/ccunit/mace/ops/fully_connected_test.cc @@ -138,7 +138,7 @@ void Random(const index_t batch, .Input("Bias") .Output("OutputNCHW") .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1f) + .AddFloatArg("activation_coefficient", 0.1f) .Finalize(net.NewOperatorDef()); // run cpu @@ -158,7 +158,7 @@ void Random(const index_t batch, .Input("Bias") .Output("Output") .AddStringArg("activation", "LEAKYRELU") - .AddFloatArg("leakyrelu_coefficient", 0.1f) + .AddFloatArg("activation_coefficient", 0.1f) .AddIntArg("T", static_cast(DataTypeToEnum::value)) .Finalize(net.NewOperatorDef()); diff --git a/tools/python/transform/base_converter.py b/tools/python/transform/base_converter.py index 3b6279ff411a9bd7fc2832a40706b1dbe4f6ad7f..2862b785e9eed78ea68bc81d74c778833f94c22a 100644 --- a/tools/python/transform/base_converter.py +++ b/tools/python/transform/base_converter.py @@ -223,7 +223,7 @@ class MaceKeyword(object): mace_element_type_str = 'type' mace_activation_type_str = 'activation' mace_activation_max_limit_str = 'max_limit' - mace_activation_leakyrelu_coefficient_str = 'leakyrelu_coefficient' + mace_activation_coefficient_str = 'activation_coefficient' mace_resize_size_str = 'size' mace_batch_to_space_crops_str = 'crops' mace_paddings_str = 'paddings' diff --git a/tools/python/transform/caffe_converter.py b/tools/python/transform/caffe_converter.py index 09fd9c4bc99e022ae16da2bc5c6b427cc5f6d3ec..0eddb5f958871004aff28bc279a95fbb6dc36529 100644 --- a/tools/python/transform/caffe_converter.py +++ b/tools/python/transform/caffe_converter.py @@ -166,6 +166,7 @@ class CaffeConverter(base_converter.ConverterInterface): 'TanH': ActivationType.TANH, 'Sigmoid': ActivationType.SIGMOID, 'Clip': ActivationType.RELUX, + 'ELU': ActivationType.ELU, } def __init__(self, option, src_model_file, src_weight_file): @@ -181,6 +182,7 @@ class CaffeConverter(base_converter.ConverterInterface): 'Sigmoid': self.convert_activation, 'PReLU': self.convert_activation, 'Clip': self.convert_activation, + 'ELU': self.convert_activation, 'Pooling': self.convert_pooling, 'Concat': self.convert_concat, 'Slice': self.convert_slice, @@ -509,7 +511,7 @@ class CaffeConverter(base_converter.ConverterInterface): negative_slope = caffe_op.layer.relu_param.negative_slope if negative_slope != 0: param_arg = op.arg.add() - param_arg.name = MaceKeyword.mace_activation_leakyrelu_coefficient_str # noqa + param_arg.name = MaceKeyword.mace_activation_coefficient_str param_arg.f = caffe_op.layer.relu_param.negative_slope type_arg.s = six.b(ActivationType.LEAKYRELU.name) elif caffe_op.type == 'ReLU6': @@ -522,6 +524,11 @@ class CaffeConverter(base_converter.ConverterInterface): limit_arg = op.arg.add() limit_arg.name = MaceKeyword.mace_activation_max_limit_str limit_arg.f = caffe_op.layer.clip_param.max + elif caffe_op.type == 'ELU': + # TODO(luxuhui): we have not verify ELU for Caffe + param_arg = op.arg.add() + param_arg.name = MaceKeyword.mace_activation_coefficient_str + param_arg.f = caffe_op.layer.elu_param.alpha def convert_folded_batchnorm(self, caffe_op): op = self.convert_general_op(caffe_op) diff --git a/tools/python/transform/onnx_converter.py b/tools/python/transform/onnx_converter.py index 8dd0dbd1756b64930da52ea31b9da02135168753..041b625c822a54d58459af38dcb78e488682154b 100644 --- a/tools/python/transform/onnx_converter.py +++ b/tools/python/transform/onnx_converter.py @@ -629,18 +629,16 @@ class OnnxConverter(base_converter.ConverterInterface): type_arg.s = six.b(self.activation_type[node.op_type].name) if "alpha" in node.attrs: - alpha_tensor_name = node.name + '_alpha' - alpha_value = np.array([node.attrs["alpha"]]) - self.add_tensor(alpha_tensor_name, alpha_value.reshape(-1).shape, - mace_pb2.DT_FLOAT, alpha_value) - op.input.extend([alpha_tensor_name]) + alpha_value = node.attrs["alpha"] else: if node.op_type == OnnxOpType.LeakyRelu.name: alpha_value = 0.01 + elif node.op_type == OnnxOpType.Elu.name: + alpha_value = 1.0 else: alpha_value = 0 alpha_arg = op.arg.add() - alpha_arg.name = MaceKeyword.mace_activation_leakyrelu_coefficient_str + alpha_arg.name = MaceKeyword.mace_activation_coefficient_str alpha_arg.f = alpha_value def convert_affine(self, node): diff --git a/tools/python/transform/tensorflow_converter.py b/tools/python/transform/tensorflow_converter.py index 90ab048fcb83c8a84ec8bbb8cb48b503921ed864..1ee69b0dc3889a63e77cdf8c8c8bd567e94cb5ff 100644 --- a/tools/python/transform/tensorflow_converter.py +++ b/tools/python/transform/tensorflow_converter.py @@ -70,6 +70,7 @@ TFSupportedOps = [ 'DepthwiseConv2dNative', 'DepthToSpace', 'Div', + 'Elu', 'Equal', 'ExpandDims', 'ExtractImagePatches', @@ -190,6 +191,7 @@ class TensorflowConverter(base_converter.ConverterInterface): } activation_type = { + TFOpType.Elu.name: ActivationType.ELU, TFOpType.Relu.name: ActivationType.RELU, TFOpType.Relu6.name: ActivationType.RELUX, TFOpType.Tanh.name: ActivationType.TANH, @@ -232,6 +234,7 @@ class TensorflowConverter(base_converter.ConverterInterface): TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d, TFOpType.DepthToSpace.name: self.convert_space_depth, TFOpType.Div.name: self.convert_elementwise, + TFOpType.Elu.name: self.convert_activation, TFOpType.Equal.name: self.convert_elementwise, TFOpType.ExpandDims.name: self.convert_expand_dims, TFOpType.ExtractImagePatches.name: @@ -668,11 +671,18 @@ class TensorflowConverter(base_converter.ConverterInterface): limit_arg = op.arg.add() limit_arg.name = MaceKeyword.mace_activation_max_limit_str limit_arg.f = 6.0 - elif tf_op.type == TFOpType.LeakyRelu.name: + elif tf_op.type == TFOpType.LeakyRelu.name or \ + tf_op.type == TFOpType.Elu.name: alpha_arg = op.arg.add() alpha_arg.name = \ - MaceKeyword.mace_activation_leakyrelu_coefficient_str - alpha_arg.f = tf_op.get_attr(tf_alpha_str) + MaceKeyword.mace_activation_coefficient_str + try: + alpha_arg.f = tf_op.get_attr(tf_alpha_str) + except ValueError: + if tf_op.type == TFOpType.LeakyRelu.name: + alpha_arg.f = 0.0 + else: + alpha_arg.f = 1.0 def convert_fill(self, tf_op): op = self.convert_general_op(tf_op) diff --git a/tools/python/transform/transformer.py b/tools/python/transform/transformer.py index c26f275350718dab74ad65ac68106f65c3859f11..fbaff40654b8f8638b134b32b0e8044f431fa6f2 100644 --- a/tools/python/transform/transformer.py +++ b/tools/python/transform/transformer.py @@ -980,10 +980,7 @@ class Transformer(base_converter.ConverterInterface): [ActivationType.RELU.name, ActivationType.RELUX.name]) else: - fold_consumer = ( - act_type != ActivationType.PRELU.name - and act_type != ActivationType.ELU.name - ) + fold_consumer = (act_type != ActivationType.PRELU.name) # during quantization, only fold relu/relux if (self._option.quantize_stat or self._option.quantize) \ and act_type not in [ActivationType.RELU.name, @@ -997,7 +994,7 @@ class Transformer(base_converter.ConverterInterface): if arg.name == MaceKeyword.mace_activation_type_str \ or arg.name == \ MaceKeyword.mace_activation_max_limit_str \ - or arg.name == MaceKeyword.mace_activation_leakyrelu_coefficient_str: # noqa + or arg.name == MaceKeyword.mace_activation_coefficient_str: # noqa op.arg.extend([arg]) self.replace_quantize_info(op, consumer_op)