kern.cuh 3.7 KB
Newer Older
M
Megvii Engine Team 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
/**
 * \file dnn/src/cuda/tqt/kern.cuh
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
 * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
 */

#pragma once

#include "src/cuda/elemwise_helper.cuh"
#include "src/cuda/utils.cuh"

#if MEGDNN_CC_HOST
#include "megdnn/oprs.h"
#endif

namespace megdnn {
namespace cuda {

template <typename ctype>
struct TQTKernOp {
    ctype* input;
    ctype* output;
    ctype qmin, qmax;

    __device__ void operator()(uint32_t idx, ctype scale) {
        ctype t = powf(2, scale);
        ctype x = round(input[idx] / t);
        x = fmaxf(fminf(x, qmax), qmin);
        output[idx] = x * t;
    }

#if MEGDNN_CC_HOST
    TQTKernOp(const TensorND& input, const TensorND& output,
              const TQT::Param& param)
            : input{input.ptr<ctype>()},
              output{output.ptr<ctype>()},
              qmin(param.qmin),
              qmax(param.qmax) {}
#endif
};

template <typename ctype>
struct TQTBwdKernOp {
    ctype* diff;
    ctype* input;
    ctype* grad_x;
    ctype* grad_s;
    ctype qmin, qmax;

    __device__ void operator()(uint32_t idx, ctype scale) {
        ctype t = powf(2, scale);
        ctype scaled = input[idx] / t;
        ctype rounded = round(scaled);
        rounded = fmaxf(fminf(rounded, qmax), qmin);
        bool mask_clip = scaled < -0.5 + qmin && scaled > 0.5 + qmax;
        bool mask_quant = !mask_clip;

        grad_x[idx] = diff[idx] * mask_quant;
        ctype grad_quant =
                diff[idx] * mask_quant * (rounded - scaled) * t * log(2.0);
        ctype grad_clip = diff[idx] * mask_clip * rounded * t * log(2.0);
        grad_s[idx] = grad_quant + grad_clip;
    }

#if MEGDNN_CC_HOST
    TQTBwdKernOp(const TensorND& diff, const TensorND& input,
                 const TensorND& grad_x, const TensorND& grad_s,
                 const TQT::Param& param)
            : diff{diff.ptr<ctype>()},
              input{input.ptr<ctype>()},
              grad_x{grad_x.ptr<ctype>()},
              grad_s{grad_s.ptr<ctype>()},
              qmin(param.qmin),
              qmax(param.qmax) {}
#endif
};

template <typename ctype>
struct TQTKernOpNonContig {
    ctype qmin;
    ctype qmax;

    __device__ void operator()(uint32_t, ctype& input, ctype& scale,
                               ctype& output) {
        ctype t = powf(2, scale);
        ctype x = round(input / t);
        x = fmaxf(fminf(x, qmax), qmin);
        output = x * t;
    }
#if MEGDNN_CC_HOST
    TQTKernOpNonContig(const TQT::Param& param)
            : qmin(param.qmin), qmax(param.qmax) {}
#endif
};

template <typename ctype>
struct TQTBwdKernOpNonContig {
    ctype qmin;
    ctype qmax;

    __device__ void operator()(uint32_t, ctype& diff, ctype& input,
                               ctype& scale, ctype& grad_x, ctype& grad_s) {
        ctype t = powf(2, scale);
        ctype scaled = input / t;
        ctype rounded = round(scaled);
        rounded = fmaxf(fminf(rounded, qmax), qmin);
        bool mask_clip = scaled < -0.5 + qmin && scaled > 0.5 + qmax;
        bool mask_quant = !mask_clip;

        grad_x = diff * mask_quant;
        ctype grad_quant =
                diff * mask_quant * (rounded - scaled) * t * log(2.0);
        ctype grad_clip = diff * mask_clip * rounded * t * log(2.0);
        grad_s = grad_quant + grad_clip;
    }
#if MEGDNN_CC_HOST
    TQTBwdKernOpNonContig(const TQT::Param& param)
            : qmin(param.qmin), qmax(param.qmax) {}
#endif
};

}  // namespace cuda
}  // namespace megdnn