lsq.cpp 2.8 KB
Newer Older
M
Megvii Engine Team 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/**
 * \file dnn/src/common/lsq.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
 */

#include "megdnn/oprs.h"
#include "src/common/utils.h"

namespace megdnn {

void LSQBase::deduce_layout_fwd(const TensorLayout& input,
                                TensorLayout& output) {
    output = TensorLayout(input, input.dtype);
}

void LSQBase::check_layout_fwd(const TensorLayout& input,
                               const TensorLayout& scale,
                               const TensorLayout& zero_point,
                               const TensorLayout& grad_scale,
                               const TensorLayout& output) {
    megdnn_assert(input.dtype == dtype::Float32());
    megdnn_assert(scale.dtype == dtype::Float32());
    megdnn_assert(zero_point.dtype == dtype::Float32());
    megdnn_assert(grad_scale.dtype == dtype::Float32());
    TensorLayout expected;
    deduce_layout_fwd(input, expected);
    megdnn_assert_eq_layout(expected, output);
}

void LSQForward::deduce_layout(const TensorLayout& input,
                               const TensorLayout& /* scale */,
                               const TensorLayout& /*zero_point*/,
                               const TensorLayout& /*grad_scale*/,
                               TensorLayout& output) {
    deduce_layout_fwd(input, output);
}

void LSQForward::check_exec(const TensorLayout& input,
                            const TensorLayout& scale,
                            const TensorLayout& zero_point,
                            const TensorLayout& grad_scale,
                            const TensorLayout& output,
                            size_t workspace_in_bytes) {
    check_layout_fwd(input, scale, zero_point, grad_scale, output);
    auto required_workspace_space = get_workspace_in_bytes(
            input, scale, zero_point, grad_scale, output);
    megdnn_assert(workspace_in_bytes >= required_workspace_space);
}

void LSQBackward::check_exec(
        const TensorLayout& diff, const TensorLayout& input,
        const TensorLayout& scale, const TensorLayout& zero_point,
        const TensorLayout& grad_scale, const TensorLayout& grad_x,
        const TensorLayout& grad_s, size_t workspace_in_bytes) {
    megdnn_assert_eq_shape(diff, input);
    megdnn_assert_eq_shape(grad_x, input);
    auto required_worspace_space = get_workspace_in_bytes(
            diff, input, scale, zero_point, grad_scale, grad_x, grad_s);
    megdnn_assert(workspace_in_bytes >= required_worspace_space);
}

}  // namespace megdnn