op_def.cpp 2.8 KB
Newer Older
1
/**
M
Megvii Engine Team 已提交
2 3
 * \file imperative/src/impl/op_def.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
4
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6
 *
M
Megvii Engine Team 已提交
7 8 9
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 */

#include "megbrain/imperative/op_def.h"
#include "megbrain/imperative/ops/opr_attr.h"

#include "./op_trait.h"

namespace mgb {
namespace imperative {

std::shared_ptr<OpDef> OpDef::make_from_op_node(
    cg::OperatorNodeBase* node) {
    OpTrait* trait;
    trait = OpTrait::find_by_typeinfo(node->dyn_typeinfo());
    if (!trait) {
        // TODO: register `make_from_op_node` for each OperatorNode
        // instead of forwarding to OprAttr
        trait = OpTrait::find_by_typeinfo(OprAttr::typeinfo());
    }
    mgb_assert(trait);
    return trait->make_from_op_node(node);
}

33 34 35 36 37 38
DispatchMode OpDef::decide_dispatch_mode(
    const OpDef& def,
    const SmallVector<LogicalTensorDesc>& inputs) {
    return def.trait()->decide_dispatch_mode(def, inputs);
}

39 40
SmallVector<TensorPtr> OpDef::apply_on_physical_tensor(
    const OpDef& def,
41 42
    SmallVector<TensorPtr> inputs) {
    return def.trait()->apply_on_physical_tensor(def, std::move(inputs));
43 44
}

45 46 47 48 49 50 51 52
void OpDef::apply_on_device_tensornd(
    const OpDef& def,
    const SmallVector<DeviceTensorND>& inputs,
    SmallVector<DeviceTensorND>* outputs) {
    def.trait()->apply_on_device_tensornd(def, inputs, outputs);
    return;
}

53
VarNodeArray OpDef::apply_on_var_node(
54 55 56 57 58
    const OpDef& def,
    const VarNodeArray& inputs) {
    return def.trait()->apply_on_var_node(def, inputs);
}

59
std::tuple<SmallVector<LogicalTensorDesc>, bool> OpDef::infer_output_attrs_fallible(
60 61 62 63 64 65 66 67 68 69 70 71 72
    const OpDef& def,
    const SmallVector<LogicalTensorDesc>& inputs) {
    return def.trait()->infer_output_attrs_fallible(def, inputs);
}

BackwardGraphResult OpDef::make_backward_graph(
    const OpDef& def,
    const SmallVector<LogicalTensorDesc>& inputs,
    const SmallVector<bool>& input_requires_grad,
    const SmallVector<bool>& output_has_grad) {
    return def.trait()->make_backward_graph(def, inputs, input_requires_grad, output_has_grad);
}

73 74 75 76 77 78 79 80
size_t OpDef::hash() const {
    return trait()->hash(*this);
}

bool OpDef::is_same_st(const Hashable& rhs) const {
    return trait()->is_same_st(*this, static_cast<const OpDef&>(rhs));
}

81 82 83 84 85 86 87 88 89 90 91 92 93
const OpTrait* OpDef::trait() const {
    if (!m_trait) {
        m_trait = OpTrait::find_by_typeinfo(dyn_typeinfo());
        mgb_throw_if(!m_trait, MegBrainError,
            "can not find op_trait by %s", dyn_typeinfo()->name);
    }
    return m_trait;
}

} // namespace imperative
} // namespace mgb

// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}