avx2_strategy_4x16x2.cpp 7.3 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/x86/matrix_mul/int8/avx2_strategy_4x16x2.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13 14 15 16 17 18 19 20 21
 */

#include "src/common/utils.h"
#include "src/x86/matrix_mul/int8/kernel_avx2_4x16x2.h"
#include "src/x86/matrix_mul/int8/strategy.h"
#include "src/x86/utils.h"

using namespace megdnn;
using namespace x86;
using namespace x86::matmul;

22 23 24
static inline void gemm_packa(dt_int16* out, const dt_int8* in, int ldin,
                              int y0, int ymax, int k0, int kmax,
                              bool transpose) {
25 26 27 28 29 30 31 32
    if (transpose) {
        matmul_avx2_4x16x2::gemm_s8s8s32_avx2_4x16x2_pack_at(out, in, ldin, y0,
                                                             ymax, k0, kmax);
    } else {
        matmul_avx2_4x16x2::gemm_s8s8s32_avx2_4x16x2_pack_an(out, in, ldin, y0,
                                                             ymax, k0, kmax);
    }
}
33 34
static inline void gemm_packb(dt_int8* out, const dt_int8* in, int ldin, int x0,
                              int xmax, int k0, int kmax, bool transpose) {
35 36 37 38 39 40 41 42
    if (transpose) {
        matmul_avx2_4x16x2::gemm_s8s8s32_avx2_4x16x2_pack_bt(out, in, ldin, x0,
                                                             xmax, k0, kmax);
    } else {
        matmul_avx2_4x16x2::gemm_s8s8s32_avx2_4x16x2_pack_bn(out, in, ldin, x0,
                                                             xmax, k0, kmax);
    }
}
43 44 45 46 47
template <typename CType>
static inline void gemm_kern(const dt_int16* pack_a_ptr,
                             const dt_int8* pack_b_ptr, size_t m, size_t n,
                             size_t k, CType* c_ptr, size_t ldc,
                             bool is_first_k) {
48 49 50 51 52 53 54 55 56 57 58 59 60 61
    constexpr size_t m_tile = 4;
    constexpr size_t n_tile = 16;
    constexpr size_t k_tile = 2;
    const size_t roundup_k = round_up(k, k_tile);

    const size_t m_end = m / m_tile * m_tile;
    const size_t n_end = n / n_tile * n_tile;
    const size_t m_remain = m - m_end;
    const size_t n_remain = n - n_end;

    for (size_t m_offset = 0; m_offset < m_end; m_offset += m_tile) {
        auto iter_a_ptr = pack_a_ptr + m_offset * roundup_k;
        for (size_t n_offset = 0; n_offset < n_end; n_offset += n_tile) {
            auto iter_b_ptr = pack_b_ptr + n_offset * roundup_k;
62
            auto iter_c_ptr = c_ptr + m_offset * ldc + n_offset;
63 64 65 66 67
            matmul_avx2_4x16x2::kern_gemm_s8s8s32_avx2_4x16x2(
                    iter_a_ptr, iter_b_ptr, iter_c_ptr, ldc, k);
        }
        if (n_remain > 0) {
            auto iter_b_ptr = pack_b_ptr + n_end * roundup_k;
68
            auto iter_c_ptr = c_ptr + m_offset * ldc + n_end;
69 70 71 72 73 74 75 76 77 78 79 80 81
            if (n_remain <= 8) {
                matmul_avx2_4x16x2::kern_gemm_s8s8s32_avx2_4x16x2_n8_remain_n(
                        iter_a_ptr, iter_b_ptr, iter_c_ptr, ldc, k, n_remain);
            } else {
                matmul_avx2_4x16x2::kern_gemm_s8s8s32_avx2_4x16x2_remain_n(
                        iter_a_ptr, iter_b_ptr, iter_c_ptr, ldc, k, n_remain);
            }
        }
    }
    if (m_remain > 0) {
        auto iter_a_ptr = pack_a_ptr + m_end * roundup_k;
        for (size_t n_offset = 0; n_offset < n_end; n_offset += n_tile) {
            auto iter_b_ptr = pack_b_ptr + n_offset * roundup_k;
82
            auto iter_c_ptr = c_ptr + m_end * ldc + n_offset;
83 84 85 86 87
            matmul_avx2_4x16x2::kern_gemm_s8s8s32_avx2_4x16x2_remain_m(
                    iter_a_ptr, iter_b_ptr, iter_c_ptr, ldc, k, m_remain);
        }
        if (n_remain > 0) {
            auto iter_b_ptr = pack_b_ptr + n_end * roundup_k;
88
            auto iter_c_ptr = c_ptr + m_end * ldc + n_end;
89 90 91 92 93 94 95 96 97 98 99 100
            if (n_remain <= 8) {
                matmul_avx2_4x16x2::kern_gemm_s8s8s32_avx2_4x16x2_n8_remain_m_n(
                        iter_a_ptr, iter_b_ptr, iter_c_ptr, ldc, k, m_remain,
                        n_remain);
            } else {
                matmul_avx2_4x16x2::kern_gemm_s8s8s32_avx2_4x16x2_remain_m_n(
                        iter_a_ptr, iter_b_ptr, iter_c_ptr, ldc, k, m_remain,
                        n_remain);
            }
        }
    }
}
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158

MEGDNN_REG_GEMM_STRATEGY_IMPL(gemm_avx2_s8s8s32_4x16x2);
void gemm_avx2_s8s8s32_4x16x2::pack_A(dt_int16* out, const dt_int8* in,
                                      int ldin, int y0, int ymax, int k0,
                                      int kmax, bool transpose) const {
    gemm_packa(out, in, ldin, y0, ymax, k0, kmax, transpose);
}

void gemm_avx2_s8s8s32_4x16x2::pack_B(dt_int8* out, const dt_int8* in, int ldin,
                                      int x0, int xmax, int k0, int kmax,
                                      bool transpose) const {
    gemm_packb(out, in, ldin, x0, xmax, k0, kmax, transpose);
}

void gemm_avx2_s8s8s32_4x16x2::kern(const dt_int16* pack_a_ptr,
                                    const dt_int8* pack_b_ptr, size_t m,
                                    size_t n, size_t k, dt_int32* c_ptr,
                                    size_t ldc, bool is_first_k,
                                    const dt_int32*, dt_int32*) const {
    megdnn_assert(A_dtype.enumv() == B_dtype.enumv() &&
                          ((A_dtype.enumv() == DTypeEnum::Int8 &&
                            C_dtype.enumv() == DTypeEnum::Int32) ||
                           (A_dtype.enumv() == DTypeEnum::QuantizedS8 &&
                            C_dtype.enumv() == DTypeEnum::QuantizedS32)),
                  "A: %s B: %s C: %s", A_dtype.name(), B_dtype.name(),
                  C_dtype.name());
    megdnn_assert(is_first_k == true);
    gemm_kern(pack_a_ptr, pack_b_ptr, m, n, k, c_ptr, ldc, is_first_k);
}

MEGDNN_REG_GEMM_STRATEGY_IMPL(gemm_avx2_s8s8s16_4x16x2);
void gemm_avx2_s8s8s16_4x16x2::pack_A(dt_int16* out, const dt_int8* in,
                                      int ldin, int y0, int ymax, int k0,
                                      int kmax, bool transpose) const {
    gemm_packa(out, in, ldin, y0, ymax, k0, kmax, transpose);
}

void gemm_avx2_s8s8s16_4x16x2::pack_B(dt_int8* out, const dt_int8* in, int ldin,
                                      int x0, int xmax, int k0, int kmax,
                                      bool transpose) const {
    gemm_packb(out, in, ldin, x0, xmax, k0, kmax, transpose);
}

void gemm_avx2_s8s8s16_4x16x2::kern(const dt_int16* pack_a_ptr,
                                    const dt_int8* pack_b_ptr, size_t m,
                                    size_t n, size_t k, dt_int16* c_ptr,
                                    size_t ldc, bool is_first_k,
                                    const dt_int32*, dt_int32*) const {
    megdnn_assert(A_dtype.enumv() == B_dtype.enumv() &&
                          ((A_dtype.enumv() == DTypeEnum::Int8 &&
                            C_dtype.enumv() == DTypeEnum::Int16) ||
                           (A_dtype.enumv() == DTypeEnum::QuantizedS8 &&
                            C_dtype.enumv() == DTypeEnum::QuantizedS16)),
                  "A: %s B: %s C: %s", A_dtype.name(), B_dtype.name(),
                  C_dtype.name());
    megdnn_assert(is_first_k == true);
    gemm_kern(pack_a_ptr, pack_b_ptr, m, n, k, c_ptr, ldc, is_first_k);
}
159
// vim: syntax=cpp.doxygen