未验证 提交 be1bdcde 编写于 作者: Z zjd1988 提交者: GitHub

trying add mish arm implement(ref ncnn mish arm implement), and fix winograd bug(updated) (#408)

* modify src and test codes for yolov4

* recover crop source code

* add yolov4 example code

* add mish ops and yolov4 example code

* modify example/CMakeList.txt and tm_yolov4.cpp

* delete yolov4 demo codes

* rm yolov4 cmakelist.txt

* Update mish.c

regiseter mish op with OP_MISH_NAME

* add sys_free to avoid memory leak

* rm test.jpg for yolov4

* fix coding problems

* add mish arm implement adn fix winograd conv bug

* add div_ps arm32 implement

* add autor description
上级 9848204f
......@@ -1438,7 +1438,10 @@ int wino_conv_hcl_run_1(struct ir_tensor* input_tensor, struct ir_tensor* filter
}
}
sys_free(trans_inp);
trans_output_1(trans_out, output, biases_buf, 0, block_h, block_w, 0, out_c, out_hw, out_w, resi_h,
int bias_term = 0;
if(biases_buf != NULL)
bias_term = 1;
trans_output_1(trans_out, output, biases_buf, bias_term, block_h, block_w, 0, out_c, out_hw, out_w, resi_h,
resi_w, act_type);
sys_free(trans_out);
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: 942002795@qq.com
*/
#include "math.h"
#include <arm_neon.h>
#include "mish_kernel_arm.h"
#include "mish_math_func.h"
static void mish_kernel(int i, int id, void* data, const float* input, float* output)
{
int step = (( int* )data)[0];
const float* cur_input = input + id * step;
float* cur_output = output + id * step;
for (int i = 0; i < (step & -4); i += 4)
{
float32x4_t _input = vld1q_f32(cur_input);
float32x4_t out = vmulq_f32(_input, tanh_ps(log_ps(vaddq_f32(exp_ps(_input), vdupq_n_f32(1.f)))));
vst1q_f32(cur_output, out);
cur_input += 4;
cur_output += 4;
}
for (int i = step & ~3; i < step; i++)
{
float tmp = *input++;
*cur_output++ = tanh(log(exp(tmp) + 1.f));
}
}
int mish_run(struct ir_tensor* output_tensor, struct ir_tensor* input_tensor, int num_thread)
{
float* data = ( float* )input_tensor->data;
float* out_data = ( float* )output_tensor->data;
int chan_num = (input_tensor->dims[0]) * (input_tensor->dims[1]);
int chan_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < chan_num; i++)
{
int offset = i * chan_size;
mish_kernel(0, 0, &chan_size, data + offset, out_data + offset);
}
for(int i = 0; i < 10; i++)
{
printf("%f\n", out_data[i]);
}
return 0;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: 942002795@qq.com
*/
#ifndef _MISH_KERNEL_ARM_H_
#define _MISH_KERNEL_ARM_H_
#include "tengine_ir.h"
int mish_run(struct ir_tensor* output_tensor, struct ir_tensor* input_tensor, int num_thread);
#endif
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: 942002795@qq.com
*/
/*
refer to ncnn
https://github.com/Tencent/ncnn/blob/master/src/layer/arm/neon_mathfun_tanh.h
https://github.com/Tencent/ncnn/blob/master/src/layer/arm/neon_mathfun.h
*/
#include <arm_neon.h>
static inline float32x4_t div_ps(float32x4_t a, float32x4_t b)
{
#if __aarch64__
return vdivq_f32(a, b);
#else
float32x4_t reciprocal = vrecpeq_f32(b);
reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
// reciprocal = vmulq_f32(vrecpsq_f32(b, reciprocal), reciprocal);
return vmulq_f32(a, reciprocal);
#endif
}
#define c_exp_hi 88.3762626647949f
#define c_exp_lo -88.3762626647949f
#define c_cephes_LOG2EF 1.44269504088896341
#define c_cephes_exp_C1 0.693359375
#define c_cephes_exp_C2 -2.12194440e-4
#define c_cephes_exp_p0 1.9875691500E-4
#define c_cephes_exp_p1 1.3981999507E-3
#define c_cephes_exp_p2 8.3334519073E-3
#define c_cephes_exp_p3 4.1665795894E-2
#define c_cephes_exp_p4 1.6666665459E-1
#define c_cephes_exp_p5 5.0000001201E-1
/* exp() computed for 4 float at once */
static inline float32x4_t exp_ps(float32x4_t x)
{
float32x4_t tmp, fx;
float32x4_t one = vdupq_n_f32(1);
x = vminq_f32(x, vdupq_n_f32(c_exp_hi));
x = vmaxq_f32(x, vdupq_n_f32(c_exp_lo));
/* express exp(x) as exp(g + n*log(2)) */
fx = vmlaq_f32(vdupq_n_f32(0.5f), x, vdupq_n_f32(c_cephes_LOG2EF));
/* perform a floorf */
tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));
/* if greater, substract 1 */
uint32x4_t mask = vcgtq_f32(tmp, fx);
mask = vandq_u32(mask, vreinterpretq_u32_f32(one));
fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));
tmp = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C1));
float32x4_t z = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C2));
x = vsubq_f32(x, tmp);
x = vsubq_f32(x, z);
static const float cephes_exp_p[6] = {c_cephes_exp_p0, c_cephes_exp_p1, c_cephes_exp_p2, c_cephes_exp_p3, c_cephes_exp_p4, c_cephes_exp_p5};
float32x4_t y = vld1q_dup_f32(cephes_exp_p + 0);
float32x4_t c1 = vld1q_dup_f32(cephes_exp_p + 1);
float32x4_t c2 = vld1q_dup_f32(cephes_exp_p + 2);
float32x4_t c3 = vld1q_dup_f32(cephes_exp_p + 3);
float32x4_t c4 = vld1q_dup_f32(cephes_exp_p + 4);
float32x4_t c5 = vld1q_dup_f32(cephes_exp_p + 5);
y = vmulq_f32(y, x);
z = vmulq_f32(x, x);
y = vaddq_f32(y, c1);
y = vmulq_f32(y, x);
y = vaddq_f32(y, c2);
y = vmulq_f32(y, x);
y = vaddq_f32(y, c3);
y = vmulq_f32(y, x);
y = vaddq_f32(y, c4);
y = vmulq_f32(y, x);
y = vaddq_f32(y, c5);
y = vmulq_f32(y, z);
y = vaddq_f32(y, x);
y = vaddq_f32(y, one);
/* build 2^n */
int32x4_t mm;
mm = vcvtq_s32_f32(fx);
mm = vaddq_s32(mm, vdupq_n_s32(0x7f));
mm = vshlq_n_s32(mm, 23);
float32x4_t pow2n = vreinterpretq_f32_s32(mm);
y = vmulq_f32(y, pow2n);
return y;
}
// tanh neon vector version
// refer the scalar version from Cephes Math Library
#define c_cephes_HALFMAXLOGF 44.014845935754205f
#define c_cephes_tanh_C1 0.625f
#define c_cephes_tanh_p0 -5.70498872745E-3
#define c_cephes_tanh_p1 +2.06390887954E-2
#define c_cephes_tanh_p2 -5.37397155531E-2
#define c_cephes_tanh_p3 +1.33314422036E-1
#define c_cephes_tanh_p4 -3.33332819422E-1
/* Single precision hyperbolic tangent computed for 4 simultaneous float */
static inline float32x4_t tanh_ps(float32x4_t x)
{
float32x4_t x2 = vabsq_f32(x);
uint32x4_t mask_l = vcgeq_f32(x2, vdupq_n_f32(c_cephes_tanh_C1));
uint32x4_t mask_l2 = vcgtq_f32(x2, vdupq_n_f32(c_cephes_HALFMAXLOGF));
// abs(x) >= 0.625
// tanh(x) = 1 − 2 / (exp(2x) + 1)
float32x4_t _one = vdupq_n_f32(1.f);
float32x4_t _two = vdupq_n_f32(2.f);
float32x4_t exp_x_x = exp_ps(vaddq_f32(x, x));
#if __aarch64__
float32x4_t y0 = vsubq_f32(_one, vdivq_f32(_two, vaddq_f32(exp_x_x, _one)));
#else
float32x4_t y0 = vsubq_f32(_one, div_ps(_two, vaddq_f32(exp_x_x, _one)));
#endif
// abs(x) < 0.625
/*
z = x2 * x2;
z =
(((( -5.70498872745E-3 * z
+ 2.06390887954E-2) * z
- 5.37397155531E-2) * z
+ 1.33314422036E-1) * z
- 3.33332819422E-1) * z * x
+ x;
*/
static const float cephes_tanh_p[5] = {c_cephes_tanh_p0, c_cephes_tanh_p1, c_cephes_tanh_p2, c_cephes_tanh_p3, c_cephes_tanh_p4};
float32x4_t y = vld1q_dup_f32(cephes_tanh_p + 0);
float32x4_t c1 = vld1q_dup_f32(cephes_tanh_p + 1);
float32x4_t c2 = vld1q_dup_f32(cephes_tanh_p + 2);
float32x4_t c3 = vld1q_dup_f32(cephes_tanh_p + 3);
float32x4_t c4 = vld1q_dup_f32(cephes_tanh_p + 4);
float32x4_t z = vmulq_f32(x, x);
y = vmulq_f32(y, z);
y = vaddq_f32(y, c1);
y = vmulq_f32(y, z);
y = vaddq_f32(y, c2);
y = vmulq_f32(y, z);
y = vaddq_f32(y, c3);
y = vmulq_f32(y, z);
y = vaddq_f32(y, c4);
y = vmulq_f32(y, z);
y = vmulq_f32(y, x);
y = vaddq_f32(y, x);
// abs(x) > HALFMAXLOGF
// return 1.0 or -1.0
uint32x4_t mask_pos = vcgtq_f32(x2, vdupq_n_f32(0.f));
float32x4_t y1 = vreinterpretq_f32_u32(vbslq_u32(mask_pos, vreinterpretq_u32_f32(vdupq_n_f32(1.f)), vreinterpretq_u32_f32(vdupq_n_f32(-1.f))));
y = vreinterpretq_f32_u32(vbslq_u32(mask_l, vreinterpretq_u32_f32(y0), vreinterpretq_u32_f32(y)));
y = vreinterpretq_f32_u32(vbslq_u32(mask_l2, vreinterpretq_u32_f32(y1), vreinterpretq_u32_f32(y)));
return y;
}
#define c_inv_mant_mask ~0x7f800000u
#define c_cephes_SQRTHF 0.707106781186547524
#define c_cephes_log_p0 7.0376836292E-2
#define c_cephes_log_p1 -1.1514610310E-1
#define c_cephes_log_p2 1.1676998740E-1
#define c_cephes_log_p3 -1.2420140846E-1
#define c_cephes_log_p4 +1.4249322787E-1
#define c_cephes_log_p5 -1.6668057665E-1
#define c_cephes_log_p6 +2.0000714765E-1
#define c_cephes_log_p7 -2.4999993993E-1
#define c_cephes_log_p8 +3.3333331174E-1
#define c_cephes_log_q1 -2.12194440e-4
#define c_cephes_log_q2 0.693359375
/* natural logarithm computed for 4 simultaneous float
* return NaN for x <= 0
*/
static inline float32x4_t log_ps(float32x4_t x)
{
float32x4_t one = vdupq_n_f32(1);
x = vmaxq_f32(x, vdupq_n_f32(0)); /* force flush to zero on denormal values */
uint32x4_t invalid_mask = vcleq_f32(x, vdupq_n_f32(0));
int32x4_t ux = vreinterpretq_s32_f32(x);
int32x4_t emm0 = vshrq_n_s32(ux, 23);
/* keep only the fractional part */
ux = vandq_s32(ux, vdupq_n_s32(c_inv_mant_mask));
ux = vorrq_s32(ux, vreinterpretq_s32_f32(vdupq_n_f32(0.5f)));
x = vreinterpretq_f32_s32(ux);
emm0 = vsubq_s32(emm0, vdupq_n_s32(0x7f));
float32x4_t e = vcvtq_f32_s32(emm0);
e = vaddq_f32(e, one);
/* part2:
* if( x < SQRTHF ) {
* e -= 1;
* x = x + x - 1.0;
* } else { x = x - 1.0; }
*/
uint32x4_t mask = vcltq_f32(x, vdupq_n_f32(c_cephes_SQRTHF));
float32x4_t tmp = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(x), mask));
x = vsubq_f32(x, one);
e = vsubq_f32(e, vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(one), mask)));
x = vaddq_f32(x, tmp);
float32x4_t z = vmulq_f32(x, x);
float32x4_t y = vdupq_n_f32(c_cephes_log_p0);
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p1));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p2));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p3));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p4));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p5));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p6));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p7));
y = vmulq_f32(y, x);
y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p8));
y = vmulq_f32(y, x);
y = vmulq_f32(y, z);
tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q1));
y = vaddq_f32(y, tmp);
tmp = vmulq_f32(z, vdupq_n_f32(0.5f));
y = vsubq_f32(y, tmp);
tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q2));
x = vaddq_f32(x, y);
x = vaddq_f32(x, tmp);
x = vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(x), invalid_mask)); // negative arg will be NAN
return x;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: 942002795@qq.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "./cortex-a/mish_kernel_arm.h"
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
exec_node->inplace_map[0] = 0;
exec_node->inplace_map[1] = 0;
exec_node->inplace_map_num = 1;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
exec_node->inplace_map_num = 0;
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
float* idata = ( float* )input_tensor->data;
float* odata = ( float* )output_tensor->data;
if (idata != odata)
{
TLOG_ERR("input and output are not the same mem\n");
set_tengine_errno(EFAULT);
return -1;
}
int num_thread = exec_graph->num_thread;
mish_run(output_tensor, input_tensor, num_thread);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_mish_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_MISH, &hcl_node_ops);
}
static int unreg_mish_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_MISH, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_mish_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_mish_hcl_ops);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册