algo.h 5.0 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/cuda/deformable_conv/bwd_data/algo.h
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8 9 10 11 12 13 14 15
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */

#pragma once

#include "megdnn/oprs.h"

16 17
#include "src/common/algo_base.h"
#include "src/common/metahelper.h"
18 19 20 21 22
#include "src/common/utils.h"
#include "src/cuda/handle.h"

#include "src/cuda/deformable_conv/opr_impl.h"

23 24
#include <unordered_map>

25 26 27 28 29 30 31 32
namespace megdnn {
namespace cuda {

class DeformableConvBackwardDataImpl::AlgoBase : public Algorithm {
protected:
    ~AlgoBase() = default;

public:
33 34 35 36
    enum class AlgoType : uint32_t {
        CUDA_MATMUL,
    };
    using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;
37
    AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
    struct SizeArgs {
        DeformableConvBackwardDataImpl* opr;
        HandleImpl* handle;
        const TensorLayout& im_layout;
        CanonizedFilterMeta filter_meta;
        const TensorLayout& offset_layout;
        const TensorLayout& mask_layout;
        const TensorLayout& out_grad_layout;
        const TensorLayout& im_grad_layout;
        const TensorLayout& offset_grad_layout;
        const TensorLayout& mask_grad_layout;

        std::string to_string() const;

        SizeArgs(DeformableConvBackwardDataImpl* opr, const TensorLayout& im,
                 const TensorLayout& filter, const TensorLayout& offset,
                 const TensorLayout& mask, const TensorLayout& out_grad,
                 const TensorLayout& im_grad, const TensorLayout& offset_grad,
                 const TensorLayout& mask_grad);

        SizeArgs(DeformableConvBackwardDataImpl* opr, const TensorLayout& im,
                 const CanonizedFilterMeta& filter, const TensorLayout& offset,
                 const TensorLayout& mask, const TensorLayout& out_grad,
                 const TensorLayout& im_grad, const TensorLayout& offset_grad,
                 const TensorLayout& mask_grad);
    };
    struct ExecArgs : public SizeArgs {
        const TensorND im_tensor, filter_tensor, offset_tensor, mask_tensor,
                out_grad_tensor;
        TensorND im_grad_tensor, offset_grad_tensor, mask_grad_tensor;
        Workspace workspace;

        ExecArgs(DeformableConvBackwardDataImpl* opr, _megdnn_tensor_in im,
                 _megdnn_tensor_in filter, _megdnn_tensor_in offset,
                 _megdnn_tensor_in mask, _megdnn_tensor_in out_grad,
                 _megdnn_tensor_out im_grad, _megdnn_tensor_out offset_grad,
                 _megdnn_tensor_out mask_grad, _megdnn_workspace workspace);
    };
    virtual bool is_available(const SizeArgs& args) const = 0;
    virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
    virtual void exec(const ExecArgs& args) const = 0;

    bool is_available_wk(const SizeArgs& args, size_t limit) {
        return is_available(args) && get_workspace_in_bytes(args) <= limit;
    }
83 84
    bool is_available_attribute(
            const SizeArgs& args,
85 86
            const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
            const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
87
            size_t limit = std::numeric_limits<size_t>::max()) {
88 89 90
        return contain_attribute_all(positive_attr) &&
               !contain_attribute_any(negative_attr) &&
               is_available_wk(args, limit);
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
    }
    AlgoBase& check_workspace(const SizeArgs& args,
                              const Workspace& workspace) {
        auto req = get_workspace_in_bytes(args);
        megdnn_assert(
                req <= workspace.size,
                "deformable_conv bwd_data algo %s: required workspace %zu "
                "bytes, got %zu",
                name(), req, workspace.size);
        return *this;
    }
};

class DeformableConvBackwardDataImpl::AlgoMatmul final : public AlgoBase {
private:
    static WorkspaceBundle get_bundle(const SizeArgs& args);

public:
    bool is_available(const SizeArgs& args) const override;
    size_t get_workspace_in_bytes(const SizeArgs& args) const override;
    void exec(const ExecArgs& args) const override;

113 114 115
    AlgoAttribute attribute() const override {
        return AlgoAttribute::REPRODUCIBLE;
    }
116

117 118 119 120 121
    std::vector<SearchItem> get_subopr_list(
            const TensorLayoutArray& layouts,
            const OperatorBase* opr) const override;

    const char* name() const override { return "MATMUL"; }
122
    MEGDNN_DECL_ALGO_TYPE(CUDA_MATMUL)
123 124
};

125 126
class DeformableConvBackwardDataImpl::AlgoPack : NonCopyableObj {
    AlgoBase::Mapper m_all_algos_map;
127

128 129 130 131 132
public:
    AlgoPack();
    AlgoMatmul algo_matmul;
    //! all algorithms
    std::vector<AlgoBase*> all_algos;
133 134

    const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
135 136 137 138 139 140
};

}  // namespace cuda
}  // namespace megdnn

// vim: syntax=cpp.doxygen