basic_arith.cpp 64.1 KB
Newer Older
1
#include "megbrain/opr/basic_arith.h"
M
Megvii Engine Team 已提交
2 3 4
#include "megbrain/gopt/basic_arith.h"
#include "megbrain/gopt/gtrans.h"
#include "megbrain/graph/grad_impl.h"
5 6
#include "megbrain/opr/basic_arith_wrapper.h"
#include "megbrain/opr/cond.h"
M
Megvii Engine Team 已提交
7
#include "megbrain/opr/io.h"
8
#include "megbrain/opr/tensor_manip.h"
M
Megvii Engine Team 已提交
9
#include "megbrain/opr/utility.h"
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
#include "megbrain/utils/arith_helper.h"

#include "./internal/megdnn_opr_wrapper.inl"

#include <cmath>

using namespace mgb;
using namespace opr;

/* ========================= BatchedDTypePromotion ========================= */
intl::BatchedDTypePromotion::BatchedDTypePromotion(const VarNodeArrayView& vars)
        : m_orig_vars{vars} {
    mgb_assert(!vars.empty());
    DType final_dtype;
    bool changed = false;
    for (size_t i = 0; i < vars.size(); ++i) {
        auto cur = vars[i]->dtype();
        if (!i) {
            final_dtype = cur;
        } else {
            auto promoted = dtype_promotion(final_dtype, cur);
            changed |= promoted != final_dtype || promoted != cur;
            final_dtype = promoted;
        }
    }
    m_changed = changed;
    m_final_dtype = final_dtype;
}

void intl::BatchedDTypePromotion::set_dtype(DType dtype) {
    mgb_assert(!m_finalized);
    if (m_final_dtype != dtype) {
        m_final_dtype = dtype;
        m_changed = true;
    }
}

const VarNodeArrayView& intl::BatchedDTypePromotion::get_vars() {
    m_finalized = true;
    if (!m_changed) {
        return m_orig_vars;
    }
    if (!m_cvt_vars_view.valid()) {
        m_cvt_vars.resize(m_orig_vars.size());
        auto dtype = m_final_dtype;
        for (size_t i = 0; i < m_cvt_vars.size(); ++i) {
            m_cvt_vars[i] = TypeCvt::make(m_orig_vars[i], dtype).node();
        }
        m_cvt_vars_view.emplace(m_cvt_vars);
    }
    return m_cvt_vars_view.val();
}

/* =========================== Elemwise =========================== */

MGB_DYN_TYPE_OBJ_FINAL_IMPL(Elemwise);
Elemwise::Elemwise(
M
Megvii Engine Team 已提交
67 68 69
        const ModeTrait& mode_trait, const VarNodeArrayView& inputs, Param param,
        const OperatorNodeConfig& config)
        : Super{inputs.at(0)->owner_graph(), config, mode_trait.name, inputs} {
70
    init_megdnn_opr(*this, param);
71
    output(0)->add_flag(VarNode::Flag::ALLOW_EMPTY_SHAPE);
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
    if (mode_trait.commutable) {
        mgb_assert(inputs.size() == 2);
        add_input({inputs[0], inputs[1]}, AddInputSortType::CUR_ADDED);
    } else {
        if (param.mode == Mode::FUSE_MUL_ADD3) {
            add_input({inputs[0], inputs[1]}, AddInputSortType::CUR_ADDED);
            add_input({inputs[2]});
        } else if (param.mode == Mode::FUSE_MUL_ADD4) {
            auto i0 = inputs[0], i1 = inputs[1], i2 = inputs[2], i3 = inputs[3];
            if (i0->id() > i1->id())
                std::swap(i0, i1);
            if (i2->id() > i3->id())
                std::swap(i2, i3);
            if (i0->id() > i2->id()) {
                std::swap(i0, i2);
                std::swap(i1, i3);
            }
            add_input({i0, i1, i2, i3});
        } else {
M
Megvii Engine Team 已提交
91
            for (auto i : inputs)
92 93 94 95 96
                add_input({i});
        }
    }

    mgb_assert(m_input_broadcastable.size() >= inputs.size());
M
Megvii Engine Team 已提交
97 98
    for (size_t i = 0; i < inputs.size(); ++i) {
        if (input()[i]->owner_opr()->same_type<opr::MarkNoBroadcastElemwise>()) {
99 100 101 102 103 104 105 106 107 108
            m_input_broadcastable[i] = false;
        } else {
            m_input_broadcastable[i] = true;
        }
    }
    if (inputs.size() == 1) {
        m_input_broadcastable[0] = false;
    } else {
        Maybe<size_t> non_scalar;
        using namespace cg::static_infer;
M
Megvii Engine Team 已提交
109 110
        auto&& mgr = owner_graph()->static_infer_manager();
        for (size_t i = 0; i < input().size(); ++i) {
111 112
            auto it = mgr.get_infer_type(input(i));
            if (!((it.shape & InferType::CONST) &&
M
Megvii Engine Team 已提交
113
                  mgr.infer_shape(input(i)).is_scalar())) {
114 115 116 117 118 119 120 121 122 123 124 125 126
                if (non_scalar.valid()) {
                    non_scalar.invalidate();
                    break;
                }
                non_scalar = i;
            }
        }
        if (non_scalar.valid()) {
            // exactly one input is non-scalar
            m_input_broadcastable[non_scalar.val()] = false;
        }
    }

M
Megvii Engine Team 已提交
127 128 129 130 131 132 133 134 135 136
    if (inputs.size() && inputs[0]->dtype().category() == DTypeCategory::QUANTIZED) {
        mgb_assert(
                param.mode == Param::Mode::ADD || param.mode == Param::Mode::SUB ||
                        param.mode == Param::Mode::NEGATE ||
                        param.mode == Param::Mode::RELU ||
                        param.mode == Param::Mode::MAX ||
                        param.mode == Param::Mode::MIN,
                "Only ADD, SUB, NEGATE, RELU, MAX and MIN is guaranteed "
                "to be supported on Elemwise for quantized DType, no support %d",
                (int)param.mode);
137 138 139
    }
}

M
Megvii Engine Team 已提交
140 141
SymbolVar Elemwise::make(
        const VarNodeArrayView& inputs, Param param, const OperatorNodeConfig& config) {
142
    auto trait = ModeTrait::from_mode(param.mode);
M
Megvii Engine Team 已提交
143 144 145
    mgb_assert(
            inputs.size() == trait.arity, "%s expects %u inputs; got %zu actually",
            trait.name, trait.arity, inputs.size());
146 147 148 149 150
    intl::BatchedDTypePromotion dtp{inputs};
    if (dtp.get_dtype().category() == DTypeCategory::INT && !trait.allow_int) {
        dtp.set_dtype(dtype::Float32());
    }

M
Megvii Engine Team 已提交
151 152 153 154 155 156
    mgb_throw_if(
            dtp.get_dtype().category() == DTypeCategory::FLOAT && !trait.allow_float,
            ConversionError,
            "elemwise mode %s does not allow float input; "
            "got inputs: %s",
            trait.name, cg::dump_var_info(inputs).c_str());
157 158

#if !MGB_BUILD_SLIM_SERVING
159 160
    auto&& options = inputs[0]->owner_graph()->options();
    if (options.graph_opt_level && !(options.disable_inplace_arith_opt)) {
M
Megvii Engine Team 已提交
161
        auto repl = gopt::optimize_elemwise_expr_inplace(dtp.get_vars(), param, config);
162 163 164 165 166 167 168 169 170 171
        if (repl)
            return repl;
    }
#endif

    return SymbolVar{inputs[0]}.insert_single_output_opr<Elemwise>(
            trait, dtp.get_vars(), param, config);
}

TensorShape Elemwise::get_output_var_shape(
M
Megvii Engine Team 已提交
172
        Mode mode, const TensorShapeArray& input_shapes) {
173 174 175 176 177 178 179
    mgb_assert(input_shapes.size() == ModeTrait::from_mode(mode).arity);
    TensorShape ret;
    megdnn::Elemwise::deduce_shape(input_shapes, ret);
    return ret;
}

void Elemwise::perform(
M
Megvii Engine Team 已提交
180 181
        Mode mode, DeviceTensorND& dest, const SmallVector<DeviceTensorND>& inputs,
        intl::UniqPtrWithCN<megdnn::Elemwise>& opr) {
182 183 184 185
    megdnn::TensorNDArray dnn_inputs(inputs.size());
    TensorShapeArray inp_shapes(inputs.size());
    DType out_dt;
    CompNode out_cn;
M
Megvii Engine Team 已提交
186 187
    for (size_t i = 0; i < inputs.size(); ++i) {
        auto&& t = inputs[i];
188 189 190 191 192 193 194
        if (!i) {
            out_cn = t.comp_node();
            out_dt = t.dtype();
        } else {
            mgb_assert(t.comp_node() == out_cn);
            mgb_assert(t.dtype() == out_dt);
        }
195 196 197 198
        if (t.shape().is_empty()) {
            mgb_assert(dest.empty());
            return;
        }
199 200 201 202 203 204 205 206
        inp_shapes[i] = t.shape();
    }
    if (!opr) {
        opr = intl::create_megdnn_opr<megdnn::Elemwise>(out_cn);
    } else {
        mgb_assert(out_cn == opr.comp_node());
    }
    out_cn.activate();
M
Megvii Engine Team 已提交
207
    for (size_t i = 0; i < inputs.size(); ++i)
208
        dnn_inputs[i] = inputs[i].as_megdnn();
M
Megvii Engine Team 已提交
209
    dest.comp_node(out_cn).dtype(out_dt).resize(get_output_var_shape(mode, inp_shapes));
210
    opr->param() = {mode};
M
Megvii Engine Team 已提交
211
    call_megdnn_opr_exec(out_cn, dnn_inputs, dest.as_megdnn(), opr.get(), nullptr);
212 213
}

214
void Elemwise::perform_dnn(
215
        CompNode cn, const megdnn::TensorND& dest, megdnn::TensorNDArray& inputs,
216
        intl::UniqPtrWithCN<megdnn::Elemwise>& opr) {
217
    call_megdnn_opr_exec(cn, inputs, dest, opr.get(), nullptr);
218 219
}

M
Megvii Engine Team 已提交
220
TensorLayoutArray Elemwise::collective_collapse(const TensorLayoutArray& layouts) {
221 222
    TensorLayoutPtrArray inp(layouts.size());
    TensorLayoutArray result(inp.size());
M
Megvii Engine Team 已提交
223
    for (size_t i = 0; i < layouts.size(); ++i) {
224 225 226 227 228 229 230
        result[i] = layouts[i];
        inp[i] = &result[i];
    }
    collective_collapse_inplace(inp);
    return result;
}

M
Megvii Engine Team 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
void Elemwise::collective_collapse_inplace(const TensorLayoutPtrArray& layouts) {
    mgb_assert(layouts.size());
    size_t ndim = layouts[0]->ndim;
    for (auto i : layouts) {
        if (i->ndim != ndim)
            mgb_throw(MegBrainError, "ndims must be same");
    }

    auto update_all = [&layouts](size_t axis) {
        for (auto i : layouts) {
            i->shape[axis] *= i->shape[axis + 1];
            i->stride[axis] = i->stride[axis + 1];
            i->remove_axis_inplace(axis + 1);
        }
    };

    auto check = [&layouts](size_t axis) -> bool {
        auto std_p =
                std::make_pair(layouts[0]->shape[axis], layouts[0]->shape[axis + 1]);
        for (auto i : layouts) {
            auto cur_p = std::make_pair(i->shape[axis], i->shape[axis + 1]);
            if (std_p != cur_p)
                return false;
            if (i->stride[axis] !=
                i->stride[axis + 1] * static_cast<ptrdiff_t>(i->shape[axis + 1]))
                return false;
        }
        return true;
    };

    for (int i = static_cast<int>(ndim) - 2; i >= 0; i--) {
        if (check(i)) {
            update_all(i);
        }
    }
266 267 268
}

void Elemwise::broadcast_collective_collapse(
M
Megvii Engine Team 已提交
269 270
        const TensorLayoutPtrArray& inp_layouts, TensorLayout* target_layout) {
    for (auto&& p : inp_layouts) {
271 272 273 274 275
        *p = p->broadcast(*target_layout);
    }
    TensorLayoutPtrArray buf(inp_layouts.size() + 1);
    buf[0] = target_layout;
    for (size_t i = 0; i < inp_layouts.size(); i++) {
M
Megvii Engine Team 已提交
276
        buf[i + 1] = inp_layouts[i];
277 278 279 280 281
    }
    collective_collapse_inplace(buf);
}

void Elemwise::mem_plan_fwd_in2out_writable() {
282
    mixin_mem_plan_fwd_in2out_writable(*this);
283 284 285
}

void Elemwise::scn_do_execute() {
286 287
    auto&& inp = input();
    megdnn::TensorNDArray dnn_inp;
M
Megvii Engine Team 已提交
288
    mgb_assert(dnn_inp.capacity() >= inp.size(), "heap allocation in elemwise exec");
289 290
    dnn_inp.resize(inp.size());
    for (size_t i = 0; i < inp.size(); ++i) {
291 292 293 294
        if (inp[i]->dev_tensor().empty()) {
            mgb_assert(output(0)->dev_tensor().empty());
            return;
        }
295
        dnn_inp[i] = (inp[i]->dev_tensor().as_megdnn());
296 297
    }
    mgb_assert(!output(0)->dev_tensor().empty());
298 299

    megdnn_opr()->param() = param();
M
Megvii Engine Team 已提交
300 301 302
    call_megdnn_opr_exec(
            comp_node(), dnn_inp, output(0)->dev_tensor().as_megdnn(), megdnn_opr(),
            this);
303 304 305 306 307 308 309 310
}

void Elemwise::init_output_static_infer_desc() {
    Super::init_output_static_infer_desc();
    static StaticInferOpr<megdnn::Elemwise> static_infer_opr;

    using namespace cg::static_infer;

M
Megvii Engine Team 已提交
311
    auto infer_value = [this](DeviceTensorND& dest, const InpVal& inp) {
312
        SmallVector<DeviceTensorND> inp_vals(inp.val.size());
M
Megvii Engine Team 已提交
313
        for (size_t i = 0; i < inp_vals.size(); ++i)
314 315 316 317 318 319 320
            inp_vals[i] = inp.val[i].value();
        auto sopr = static_infer_opr.lock();
        perform(param().mode, dest, inp_vals, sopr());
        return true;
    };

    DepVal deps(input().size());
M
Megvii Engine Team 已提交
321
    for (size_t i = 0; i < input().size(); ++i)
322 323 324 325 326 327
        deps[i] = {input(i), DepType::VALUE};
    owner_graph()->static_infer_manager().register_value_infer(
            output(0), {SourceType::DEP, deps, infer_value});
}

void Elemwise::get_output_var_shape(
M
Megvii Engine Team 已提交
328
        const TensorShapeArray& inp_shape, TensorShapeArray& out_shape) const {
329
    out_shape.at(0) = get_output_var_shape(param().mode, inp_shape);
M
Megvii Engine Team 已提交
330 331 332 333
    for (size_t i = 0; i < input().size(); ++i) {
        mgb_throw_if(
                !m_input_broadcastable[i] && !out_shape[0].eq_shape(inp_shape[i]),
                GraphError,
334
                "input %zu declared to be non-broadcastable but broacast "
M
Megvii Engine Team 已提交
335 336
                "actually happened",
                i);
337 338 339 340
    }
}

void Elemwise::add_input_layout_constraint() {
M
Megvii Engine Team 已提交
341
    for (auto i : input()) {
342 343 344 345 346
        i->add_layout_constraint_monotone();
    }
}

void Elemwise::call_megdnn_opr_exec(
M
Megvii Engine Team 已提交
347 348
        CompNode comp_node, megdnn::TensorNDArray& inp, const megdnn::TensorND& out,
        megdnn::Elemwise* opr, Elemwise* caller) {
349
    if (opr->param().mode == Mode::FUSE_MUL_ADD3 &&
M
Megvii Engine Team 已提交
350 351
        !(inp[2].layout.eq_layout(inp[0].layout) ||
          inp[2].layout.eq_layout(inp[1].layout) || inp[2].layout.is_scalar())) {
352
        if (caller && !caller->fuse_badlayout_warn_printed()) {
M
Megvii Engine Team 已提交
353 354
            mgb_log_debug(
                    "%s: FUSE_MUL_ADD3 input layouts mismatch: %s %s %s; "
355
                    "fallback to normal computing",
M
Megvii Engine Team 已提交
356
                    caller->cname(), inp[0].layout.to_string().c_str(),
357
                    inp[1].layout.to_string().c_str(),
M
Megvii Engine Team 已提交
358
                    inp[2].layout.to_string().c_str());
359 360 361
            caller->m_fuse_badlayout_warn_printed = true;
        }

M
Megvii Engine Team 已提交
362
        for (auto&& i : inp) {
363 364 365 366
            i.layout = i.layout.broadcast(out.layout);
        }

        megdnn::TensorNDArray run_inp(2);
M
Megvii Engine Team 已提交
367 368
        auto run = [&](Mode mode, const megdnn::TensorND& i0,
                       const megdnn::TensorND& i1, const megdnn::TensorND& out) {
369 370 371 372 373 374
            run_inp[0] = i0;
            run_inp[1] = i1;
            opr->param() = {mode};
            opr->exec(run_inp, out);
        };

M
Megvii Engine Team 已提交
375 376
        auto tmp = intl::get_temp_tensor(
                caller ? caller->owner_graph() : nullptr, comp_node, out.layout);
377 378 379 380 381
        auto tmpv = tmp.as_megdnn();

        MGB_TRY {
            run(Mode::MUL, inp[0], inp[1], tmpv);
            run(Mode::ADD, inp[2], tmpv, out);
M
Megvii Engine Team 已提交
382 383
        }
        MGB_FINALLY(opr->param() = {Mode::FUSE_MUL_ADD3});
384 385 386 387
        return;
    }

    if (opr->param().mode == Mode::FUSE_MUL_ADD4 &&
M
Megvii Engine Team 已提交
388 389 390 391
        !(inp[0].layout.eq_layout(inp[2].layout) &&
          inp[1].layout.eq_layout(inp[3].layout)) &&
        !(inp[0].layout.eq_layout(inp[3].layout) &&
          inp[1].layout.eq_layout(inp[2].layout))) {
392 393 394 395
        if (caller && !caller->fuse_badlayout_warn_printed()) {
            mgb_log_debug(
                    "%s: FUSE_MUL_ADD4 input layouts mismatch: %s %s %s %s; "
                    "fallback to normal computing",
M
Megvii Engine Team 已提交
396
                    caller->cname(), inp[0].layout.to_string().c_str(),
397 398
                    inp[1].layout.to_string().c_str(),
                    inp[2].layout.to_string().c_str(),
M
Megvii Engine Team 已提交
399
                    inp[3].layout.to_string().c_str());
400 401 402
            caller->m_fuse_badlayout_warn_printed = true;
        }

M
Megvii Engine Team 已提交
403
        for (auto&& i : inp) {
404 405 406 407
            i.layout = i.layout.broadcast(out.layout);
        }

        megdnn::TensorNDArray run_inp(2);
M
Megvii Engine Team 已提交
408 409
        auto run = [&](Mode mode, const megdnn::TensorND& i0,
                       const megdnn::TensorND& i1, const megdnn::TensorND& out) {
410 411 412 413 414 415
            run_inp[0] = i0;
            run_inp[1] = i1;
            opr->param() = {mode};
            opr->exec(run_inp, out);
        };

M
Megvii Engine Team 已提交
416 417
        auto tmp = intl::get_temp_tensor(
                caller ? caller->owner_graph() : nullptr, comp_node, out.layout);
418 419 420 421 422 423
        auto tmpv = tmp.as_megdnn();

        MGB_TRY {
            run(Mode::MUL, inp[0], inp[1], tmpv);
            run(Mode::MUL, inp[2], inp[3], out);
            run(Mode::ADD, out, tmpv, out);
M
Megvii Engine Team 已提交
424 425
        }
        MGB_FINALLY(opr->param() = {Mode::FUSE_MUL_ADD4});
426 427 428 429 430 431 432
        return;
    }

    // All Elemwise operations on QuantizedS32/QuantizedS8 are not related to
    // scale. MegDNN does not support computing Elemwise for
    // QuantizedS32/QuantizedS8, we translate the data type to Int32/Int8 before
    // passing to MegDNN.
M
Megvii Engine Team 已提交
433
    if (inp.size() && inp[0].layout.dtype.category() == DTypeCategory::QUANTIZED) {
434 435 436 437 438 439 440
        auto inp_dtype = inp[0].layout.dtype;
        DType compute_dtype;
        if (inp_dtype.enumv() == DTypeEnum::QuantizedS32) {
            compute_dtype = dtype::Int32();
        } else if (inp_dtype.enumv() == DTypeEnum::QuantizedS8) {
            compute_dtype = dtype::Int8();
        } else {
M
Megvii Engine Team 已提交
441 442 443 444
            mgb_throw(
                    MegBrainError, "Unsupported Quantized Elemwise Mode %s: %d on %s",
                    inp[0].layout.dtype.name(), int(opr->param().mode),
                    comp_node.to_string().c_str());
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
        }

        megdnn::TensorNDArray run_inp(inp);
        for (size_t i = 0; i < inp.size(); i++) {
            run_inp[i].layout.dtype = compute_dtype;
        }
        megdnn::TensorND run_out = out;
        run_out.layout.dtype = compute_dtype;
        opr->exec(run_inp, run_out);
        return;
    }

    opr->exec(inp, out);
}

460
#if MGB_ENABLE_GRAD
461 462
MGB_IMPL_OPR_GRAD(Elemwise) {
    SymbolVar i[5];
M
Megvii Engine Team 已提交
463 464
    SymbolVar i0(opr.input(0)), i1, i2, out(opr.output(0)), og{out_grad.at(0)}, result;
    for (size_t t = 0; t < opr.input().size(); ++t)
465 466 467 468 469 470 471 472
        i[t] = opr.input()[t];
    if (opr.input().size() >= 2)
        i1 = opr.input(1);
    if (opr.input().size() >= 3)
        i2 = opr.input(2);

    // negate after reduce, for better performance
    bool negate_result = false;
M
Megvii Engine Team 已提交
473 474 475 476 477
#define RET(_v)    \
    result = (_v); \
    break
#define EL1(_mode, _a)         Elemwise::make({_a}, Mode::_mode)
#define EL2(_mode, _a, _b)     Elemwise::make({_a, _b}, Mode::_mode)
478
#define EL3(_mode, _a, _b, _c) Elemwise::make({_a, _b, _c}, Mode::_mode)
M
Megvii Engine Team 已提交
479
#define RET_INVALID()          return InvalidGrad::make(opr, wrt_idx)
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530

    using Mode = Elemwise::Mode;

    switch (opr.param().mode) {
        // unary
        case Mode::RELU:
        case Mode::FUSE_ADD_RELU:
            RET(EL2(SWITCH_GT0, out, og));
        case Mode::ABS:
            RET(EL2(ABS_GRAD, i0, og));
        case Mode::ACOS:
            negate_result = true;
            RET(og / EL1(SIN, out));
        case Mode::ASIN:
            RET(og / EL1(COS, out));
        case Mode::ATAN2:
            if (wrt_idx) {
                negate_result = true;
            }
            RET(og * i[!wrt_idx] / (i0 * i0 + i1 * i1));
        case Mode::CEIL:
            return nullptr;
        case Mode::COS:
            negate_result = true;
            RET(EL1(SIN, i0) * og);
        case Mode::EXP:
            RET(og * out);
        case Mode::EXPM1:
            RET(og * EL1(EXP, i0));
        case Mode::FLOOR:
            return nullptr;
        case Mode::LOG:
            RET(og / i0);
        case Mode::LOG1P:
            RET(og / (i0 + 1));
        case Mode::NEGATE:
            negate_result = true;
            RET(og);
        case Mode::SIGMOID:
        case Mode::FUSE_ADD_SIGMOID:
            RET(EL2(SIGMOID_GRAD, out, og));
        case Mode::SIN:
            RET(EL1(COS, i0) * og);
        case Mode::TANH:
        case Mode::FUSE_ADD_TANH:
            RET(EL2(TANH_GRAD, out, og));
        case Mode::FAST_TANH:
            RET(EL2(FAST_TANH_GRAD, i0, og));
        case Mode::ROUND:
            return nullptr;
        case Mode::ERF:
M
Megvii Engine Team 已提交
531
            RET(EL1(EXP, -i0 * i0) * 2 / static_cast<float>(sqrt(M_PI)) * og);
532 533 534 535 536 537 538 539
        case Mode::ERFINV:
            RET(EL1(EXP, out * out) * static_cast<float>(sqrt(M_PI)) / 2 * og);
        case Mode::ERFC:
            RET(-EL1(EXP, -i0 * i0) * 2 / static_cast<float>(sqrt(M_PI)) * og);
        case Mode::H_SWISH:
            RET(EL2(H_SWISH_GRAD, i0, og));
        case Mode::FUSE_ADD_H_SWISH:
            RET(EL2(H_SWISH_GRAD, (i0 + i1), og));
M
Megvii Engine Team 已提交
540 541
        case Mode::NOT:
            return nullptr;
542 543 544 545
        case Mode::SILU:
            RET(EL2(SILU_GRAD, i0, og));
        case Mode::GELU:
            RET(EL2(GELU_GRAD, i0, og));
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
        case Mode::SINH:
            RET(EL1(COSH, i0) * og);
        case Mode::COSH:
            RET(EL1(SINH, i0) * og);
        case Mode::ASINH:
            RET(EL2(ASINH_GRAD, i0, og));
        case Mode::ACOSH:
            RET(EL2(ACOSH_GRAD, i0, og));
        case Mode::ATANH:
            RET(EL2(ATANH_GRAD, i0, og));
        case Mode::TAN: {
            auto two = i0.make_scalar_dt(2);
            RET(og / (EL2(POW, EL1(COS, i0), two)));
        }
        case Mode::RELU6:
            RET(EL2(RELU6_GRAD, i0, og));
        case Mode::SOFTPLUS:
            RET(EL2(SOFTPLUS_GRAD, i0, og));
        case Mode::HSIGMOID:
            RET(EL2(HSIGMOID_GRAD, i0, og));
        case Mode::LOGSIGMOID:
            RET(EL2(SOFTPLUS_GRAD, EL1(NEGATE, i0), og));
        case Mode::SQRT:
            RET(og / EL1(SQRT, i0) / 2);
        case Mode::SQUARE:
            RET(og * 2 * i0);
        case Mode::SIGN:
            RET(i0.make_scalar_dt(0).broadcast(i0.symshape()));
574 575 576 577 578 579 580 581 582

        // binary
        case Mode::ABS_GRAD:
            if (wrt_idx == 0) {
                return nullptr;
            }
            RET(EL2(ABS_GRAD, i0, og));
        case Mode::ADD:
            RET(og);
M
Megvii Engine Team 已提交
583 584
        case Mode::SAFE_DIV:
            RET_INVALID();
585 586 587
        case Mode::FLOOR_DIV:
            return nullptr;
        case Mode::MAX:
588 589 590 591 592
            if (wrt_idx) {
                RET(EL3(COND_LT_MOV, i[0], i[1], og));
            } else {
                RET(EL3(COND_LEQ_MOV, i[1], i[0], og));
            }
593
        case Mode::MIN:
594 595 596 597 598
            if (wrt_idx) {
                RET(EL3(COND_LT_MOV, i[1], i[0], og));
            } else {
                RET(EL3(COND_LEQ_MOV, i[0], i[1], og));
            }
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
        case Mode::MOD:
            if (wrt_idx == 0) {
                RET(og);
            }
            RET_INVALID();
        case Mode::MUL:
            RET(og * i[!wrt_idx]);
        case Mode::POW:
            if (wrt_idx) {
                RET(out * EL1(LOG, i0) * og);
            }
            RET(og * i1 * EL2(POW, i0, i1 - 1));
        case Mode::SIGMOID_GRAD:
            if (wrt_idx == 0) {
                auto one = i0.make_scalar_dt(1), two = i0.make_scalar_dt(2);
                RET((one - i0 * two) * i1 * og);
            }
            RET(EL2(SIGMOID_GRAD, i0, og));
        case Mode::SUB:
            negate_result = wrt_idx;
            RET(og);
        case Mode::SWITCH_GT0:
            if (!wrt_idx)
                return nullptr;
            RET(EL2(SWITCH_GT0, i0, og));
        case Mode::TANH_GRAD:
            if (wrt_idx == 0) {
                auto mtwo = i0.make_scalar_dt(-2);
                RET(mtwo * i0 * i1 * og);
            }
            RET(EL2(TANH_GRAD, i0, og));
        case Mode::TRUE_DIV:
            if (wrt_idx == 0) {
                RET(og / i1);
            }
            negate_result = true;
            RET((og * i0) * EL2(POW, i1, i1.make_scalar(-2)));
        case Mode::LOG_SUM_EXP:
            if (wrt_idx == 0) {
                RET(og * EL1(SIGMOID, i0 - i1));
            }
            RET(og * EL1(SIGMOID, i1 - i0));
        case Mode::LT:
        case Mode::LEQ:
            return nullptr;
        case Mode::EQ:
            RET_INVALID();
M
Megvii Engine Team 已提交
646 647 648 649
        case Mode::OR:
        case Mode::XOR:
        case Mode::AND:
            return nullptr;
650 651 652 653 654
        case Mode::PRELU:
            if (wrt_idx == 0) {
                RET(EL3(PRELU_GRAD, i0, og, i1));
            }
            RET(EL2(SWITCH_GT0, -i0, og * i0));
655 656 657 658 659 660

        // ternary
        case Mode::COND_LEQ_MOV:
            if (wrt_idx <= 1)
                return nullptr;
            RET(EL3(COND_LEQ_MOV, i0, i1, og));
661 662 663 664
        case Mode::COND_LT_MOV:
            if (wrt_idx <= 1)
                return nullptr;
            RET(EL3(COND_LT_MOV, i0, i1, og));
665 666 667 668 669 670 671 672 673
        case Mode::CLIP:
            if (wrt_idx == 0) {
                RET(EL3(COND_LEQ_MOV, i1, i0, EL3(COND_LEQ_MOV, i0, i2, og)));
            }
            if (wrt_idx == 1) {
                RET(EL3(COND_LEQ_MOV, i0, i1, og));
            }
            RET(EL3(COND_LEQ_MOV, i2, i0, og));

674 675 676 677 678 679 680 681 682 683
        // fuse oprs
        case Mode::FUSE_MUL_ADD3:
            if (wrt_idx < 2) {
                RET(og * i[wrt_idx ^ 1]);
            } else {
                RET(og);
            }
        case Mode::FUSE_MUL_ADD4:
            RET(og * i[wrt_idx ^ 1]);
        default:
M
Megvii Engine Team 已提交
684 685 686
            mgb_throw(
                    GraphError, "grad for elemwise mode %s unimplemented",
                    megdnn::Elemwise::ModeTrait::from_mode(opr.param().mode).name);
687 688 689 690 691 692 693
    }
#undef EL3
#undef EL2
#undef EL1
#undef RET

    if (opr.input_broadcastable()[wrt_idx]) {
M
Megvii Engine Team 已提交
694
        result = reduce_sum(result, opr::GetVarShape::make(opr.input(wrt_idx)));
695 696
    } else if (result.node()->owner_opr()->same_type<Broadcast>()) {
        // forward broadcast for optimizer to work
M
Megvii Engine Team 已提交
697 698
        result = opr::Broadcast::make(
                result.node()->owner_opr()->input(0),
699 700 701 702 703 704
                opr::GetVarShape::make(i[wrt_idx]));
    }
    if (negate_result)
        result = -result;
    return result.node();
}
705
#endif
706

M
Megvii Engine Team 已提交
707
VarNode* Elemwise::sum_grad_list(VarNode* wrt, VarNodeArray& grads) {
708 709 710 711 712 713 714
    mgb_assert(!grads.empty());
    if (grads.size() == 1)
        return grads[0];
#if MGB_ENABLE_COND_EXEC
    CondExecMerge::modify_grad_sum_list(wrt, grads);
#endif
    VarNodeArray mid_results;
M
Megvii Engine Team 已提交
715
    VarNode* ret;
716 717 718
    if (wrt->owner_graph()->options().graph_opt_level) {
        ret = gopt::GradSumListOptimizer{wrt, grads, mid_results}.get_sum();
    } else {
M
Megvii Engine Team 已提交
719
        ret = gopt::elemwise_reduce_var_list(grads, Elemwise::Mode::ADD, &mid_results);
720 721 722 723 724 725 726 727 728
    }
    mid_results.swap(grads);
    return ret;
}

void Elemwise::record_execute_deps(ExecDependencyArray& deps) {
    record_megdnn_opr(deps);
}

729 730 731
Elemwise::NodeProp* Elemwise::do_make_node_prop() const {
    auto ret = Super::do_make_node_prop();
    for (auto& inp : input()) {
M
Megvii Engine Team 已提交
732
        ret->add_dep_type_existing_var(inp, NodeProp::DepType::VALUE_ALLOW_EMPTY);
733 734 735 736
    }
    return ret;
}

737 738 739 740
/* =========================== TypeCvt =========================== */

MGB_DYN_TYPE_OBJ_FINAL_IMPL(TypeCvt);

M
Megvii Engine Team 已提交
741 742 743 744 745
TypeCvt::TypeCvt(VarNode* inp, DType dest_type, const OperatorNodeConfig& config)
        : Super{inp->owner_graph(),
                config,
                std::string("as") + dest_type.name(),
                {inp}} {
746 747 748 749 750 751 752 753
    init_megdnn_opr(*this, {});
    mgb_assert(dest_type.valid());
    add_input({inp});
    add_equivalence_component<ScalarHash<const void*>>(dest_type.handle());
    output(0)->dtype(dest_type).add_flag(VarNode::Flag::ALLOW_EMPTY_SHAPE);
}

SymbolVar TypeCvt::make(
M
Megvii Engine Team 已提交
754
        SymbolVar input, DType dest_type, const OperatorNodeConfig& config) {
755 756
    if (input.dtype() == dest_type)
        return input;
M
Megvii Engine Team 已提交
757
    return input.insert_single_output_opr<TypeCvt>(input.node(), dest_type, config);
758 759
}

M
Megvii Engine Team 已提交
760 761 762
void TypeCvt::perform(
        DeviceTensorND& dest, DType dest_type, const DeviceTensorND& src,
        intl::UniqPtrWithCN<megdnn::TypeCvt>& opr) {
763 764
    mgb_assert(src.comp_node() == opr.comp_node());
    mgb_assert(dest_type.valid());
765 766 767 768
    if (src.empty()) {
        mgb_assert(dest.empty());
        return;
    }
769 770 771 772 773 774 775 776 777 778
    if (src.dtype() == dest_type) {
        dest.copy_from(src);
        return;
    }
    src.comp_node().activate();
    dest.comp_node(src.comp_node()).dtype(dest_type).resize(src.shape());
    opr->exec(src.as_megdnn(), dest.as_megdnn());
}

void TypeCvt::add_input_layout_constraint() {
779 780
    //! Because the implementation of typecvt on arm/x86/cuda/opencl support
    //! non-contiguous memory. So we change constraint of typecvt to monotone
M
Megvii Engine Team 已提交
781
    for (auto i : input()) {
782
        i->add_layout_constraint_monotone();
783 784 785 786 787
    }
}

TypeCvt::NodeProp* TypeCvt::do_make_node_prop() const {
    auto ret = Super::do_make_node_prop();
M
Megvii Engine Team 已提交
788
    ret->add_dep_type_existing_var(input(0), NodeProp::DepType::VALUE_ALLOW_EMPTY);
789 790 791
    return ret;
}

792
#if MGB_ENABLE_GRAD
793 794 795 796 797 798 799 800 801 802 803 804
MGB_IMPL_OPR_GRAD(TypeCvt) {
    MGB_MARK_USED_VAR(wrt_idx);
    auto itype = opr.input(0)->dtype(), otype = opr.output(0)->dtype();
    if (itype.category() == DTypeCategory::FLOAT &&
        otype.category() == DTypeCategory::INT) {
        return nullptr;
    }
    if (itype.category() != DTypeCategory::FLOAT) {
        return InvalidGrad::make(opr, 0);
    }
    return TypeCvt::make(out_grad[0], opr.input(0)->dtype()).node();
}
805
#endif
806 807

void TypeCvt::mem_plan_fwd_in2out_writable() {
M
Megvii Engine Team 已提交
808 809 810
    bool cond_low_bit = input(0)->dtype().is_low_bit() &&
                        output(0)->dtype().is_low_bit() &&
                        input(0)->dtype().low_bit() == output(0)->dtype().low_bit();
811 812 813 814
    bool cond_normal = !input(0)->dtype().is_low_bit() &&
                       !output(0)->dtype().is_low_bit() &&
                       input(0)->dtype().size() == output(0)->dtype().size();
    if ((cond_low_bit || cond_normal) && input(0)->layout().is_contiguous()) {
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
        output(0)->set_fwd_in2out_writable(input(0));
    }
}

void TypeCvt::scn_do_execute() {
    auto ovar = output(0)->dev_tensor().as_megdnn();
    for (size_t i = 0; i < ovar.layout.ndim; ++i) {
        if (!ovar.layout[i]) {
            // skip execution for empty var
            return;
        }
    }
    megdnn_opr()->exec(input(0)->dev_tensor().as_megdnn(), ovar);
}

void TypeCvt::init_output_static_infer_desc() {
    static StaticInferOpr<megdnn::TypeCvt> static_infer_opr;
    Super::init_output_static_infer_desc();

    using namespace cg::static_infer;

M
Megvii Engine Team 已提交
836
    auto infer_value = [this](DeviceTensorND& dest, const InpVal& inp) {
837 838 839 840 841
        auto sopr = static_infer_opr.lock();
        perform(dest, output(0)->dtype(), inp.val.at(0).value(), sopr());
        return true;
    };
    owner_graph()->static_infer_manager().register_value_infer(
M
Megvii Engine Team 已提交
842
            output(0), {SourceType::DEP, {{input(0), DepType::VALUE}}, infer_value});
843 844 845 846 847 848 849 850 851 852
}

void TypeCvt::record_execute_deps(ExecDependencyArray& deps) {
    record_megdnn_opr(deps);
}

/* =========================== AddUpdate =========================== */

MGB_DYN_TYPE_OBJ_FINAL_IMPL(AddUpdate);

M
Megvii Engine Team 已提交
853 854 855 856 857
AddUpdate::AddUpdate(
        VarNode* dest, VarNode* delta, const Param& param,
        const OperatorNodeConfig& config)
        : Super{dest->owner_graph(), config, "inplace_add", {dest, delta}},
          m_param{param} {
858
    auto dest_opr = dest->owner_opr();
M
Megvii Engine Team 已提交
859 860
    mgb_throw_if(
            dest_opr->same_type<ImmutableTensor>(), GraphError,
861
            "AddUpdate cannot be applied on ImmutableTensor; ");
862 863 864 865 866 867 868
    add_input({dest, delta});

    /*
     * here we tell the system that output(0) would force-update input(0); the
     * topo-sorting system would ensure that all the readers finish before
     * executing this AddUpdate operation
     */
M
Megvii Engine Team 已提交
869 870
    add_output(None)->set_fwd_in2out_writable_force(input(0)).add_flag(
            VarNode::Flag::NO_MEM_RECLAIM);
871

M
Megvii Engine Team 已提交
872 873
    mgb_assert(
            m_param.disable->dtype() == dtype::Int32{},
874 875 876 877 878 879 880 881 882
            "dtype of disable flag on AddUpdate must be Int32, got %s actually.",
            m_param.disable->dtype().name());

    add_equivalence_component<ScalarHash<void*>>(m_param.alpha.get());
    add_equivalence_component<ScalarHash<void*>>(m_param.beta.get());
    add_equivalence_component<ScalarHash<void*>>(m_param.bias.get());
    add_equivalence_component<ScalarHash<void*>>(m_param.disable.get());
}

M
Megvii Engine Team 已提交
883 884 885
SymbolVar AddUpdate::make(
        SymbolVar dest, SymbolVar delta, const Param& param,
        const OperatorNodeConfig& config) {
886 887 888 889 890 891 892 893 894 895 896 897
    delta = opr::TypeCvt::make(delta, dest.dtype());
    return dest.insert_single_output_opr<AddUpdate>(
            dest.node(), delta.node(), param, config);
}

cg::OperatorNodeBase::NodeProp* AddUpdate::do_make_node_prop() const {
    auto ret = Super::do_make_node_prop();
    ret->add_flag(NodeProp::Flag::FORCE_UPDATE_INPUT_VAR);
    return ret;
}

void AddUpdate::create_megdnn_opr() {
M
Megvii Engine Team 已提交
898 899
    set_megdnn_opr(
            intl::get_megdnn_handle(comp_node())->create_operator<megdnn::AddUpdate>());
900 901 902
}

void AddUpdate::scn_do_execute() {
M
Megvii Engine Team 已提交
903 904
    mgb_assert(
            m_param.disable->dtype() == dtype::Int32{},
905 906 907
            "dtype of disable flag on AddUpdate must be Int32, got %s actually.",
            m_param.disable->dtype().name());
    auto disable = m_param.disable->get_cast<int>();
M
Megvii Engine Team 已提交
908 909 910 911 912 913 914 915 916 917
    if (disable == 1)
        return;
    mgb_assert(
            disable == 0,
            "disable flag on AddUpdate can only be 0 or 1,"
            " got %d actually.",
            disable);

    auto&& dest = output(0)->dev_tensor();
    auto&& delta_nobrd = input(1)->dev_tensor();
918 919 920 921 922
    auto delta = delta_nobrd.sub(SubTensorSpec::make_from_offset_elem(
            delta_nobrd.layout().broadcast(dest.shape()), 0));
    mgb_assert(input(0)->dev_tensor().raw_ptr() == dest.raw_ptr());
    auto beta = m_param.beta->get_cast<float>();
    if (!m_param.alpha->get_cast<bool>() && beta == 1 &&
M
Megvii Engine Team 已提交
923
        !m_param.bias->get_cast<bool>()) {
924 925 926 927
        dest.copy_from_fixlayout(delta);
    } else {
        auto opr = static_cast<megdnn::AddUpdate*>(megdnn_opr());
        opr->param() = {
M
Megvii Engine Team 已提交
928 929
                m_param.alpha->get_cast<float>(), beta,
                m_param.bias->get_cast<float>()};
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
        opr->exec(dest.as_megdnn(), delta.as_megdnn());
    }
}

void AddUpdate::init_output_static_infer_desc() {
    using namespace cg::static_infer;

    owner_graph()->static_infer_manager().register_shape_infer(
            output(0), ShapeInferDesc::make_identity(input(0)));
}

void AddUpdate::record_execute_deps(ExecDependencyArray& deps) {
    record_megdnn_opr(deps);
}

945
#if MGB_ENABLE_GRAD
946 947 948 949
MGB_IMPL_OPR_GRAD(AddUpdate) {
    // actually valid, just not implemented
    return InvalidGrad::make(opr, wrt_idx);
}
950
#endif
951

952 953 954 955 956
/* =========================== Reduce =========================== */

class Reduce::KernScheduler {
    class ValueDep final : public ExecDependency {
        DeviceTensorStorage m_val;
M
Megvii Engine Team 已提交
957

958 959 960 961
    public:
        explicit ValueDep(DeviceTensorStorage val) : m_val(std::move(val)) {}
    };

M
Megvii Engine Team 已提交
962 963 964 965 966
public:
    bool has_actual_computing() const {
        mgb_assert(m_shape_computed);
        return !m_kern_param.empty() || m_apply_side_effect;
    }
967

M
Megvii Engine Team 已提交
968
    size_t workspace_size() const { return m_workspace_spec[2].end(); }
969

M
Megvii Engine Team 已提交
970
    bool shape_computed() const { return m_shape_computed; }
971

M
Megvii Engine Team 已提交
972 973 974 975
    //! init shapes in kern param
    void init_shapes(
            megdnn::Reduce* opr, CompNode comp_node, DType dtype, Mode mode,
            TensorShape ishp, TensorShape oshp, const Param::DataType data_type);
976

M
Megvii Engine Team 已提交
977 978
    void setup_kern_params_layout_and_mode(
            Mode mode, DType inp_dtype, TensorShape& inp_shp, const Param::DataType);
979

M
Megvii Engine Team 已提交
980 981 982
    void check_shapes(const TensorShape& ishp, const TensorShape& oshp) {
        mgb_assert(m_prev_ishp.eq_shape(ishp) && m_prev_oshp.eq_shape(oshp));
    }
983

M
Megvii Engine Team 已提交
984 985 986 987
    //! update pointers in kern param; the tensors must have been allocated
    void update_ptr(
            const DeviceTensorND& input, const DeviceTensorND& dest,
            const DeviceTensorND& workspace);
988

M
Megvii Engine Team 已提交
989 990 991
    void execute(
            megdnn::Reduce* opr, const DeviceTensorND& input,
            const DeviceTensorND& dest);
992

M
Megvii Engine Team 已提交
993 994 995 996 997 998
    void record_execute_deps(ExecDependencyArray& deps) {
        if (m_elemwise_trans_opr) {
            deps.emplace_back(std::make_unique<intl::MegDNNGraphDep>(
                    std::move(m_elemwise_trans_opr)));
        }
        if (m_typecvt_opr) {
999
            deps.emplace_back(
M
Megvii Engine Team 已提交
1000
                    std::make_unique<intl::MegDNNGraphDep>(std::move(m_typecvt_opr)));
1001
        }
M
Megvii Engine Team 已提交
1002 1003
        deps.emplace_back(std::make_unique<ValueDep>(m_side_affect_wkspc.storage()));
    }
1004

M
Megvii Engine Team 已提交
1005 1006 1007
private:
    struct KernParam {
        megdnn::TensorND input, output;
1008

M
Megvii Engine Team 已提交
1009 1010
        //! param passed to megdnn
        megdnn::param::Reduce kparam;
1011

M
Megvii Engine Team 已提交
1012
        megdnn::Workspace workspace;
1013

M
Megvii Engine Team 已提交
1014 1015
        KernParam(Mode mode, int32_t ra) : kparam{mode, ra} {}
    };
1016

M
Megvii Engine Team 已提交
1017 1018 1019 1020
    struct SubWorkspace {
        size_t size, offset;
        size_t end() const { return size + offset; }
    };
1021

M
Megvii Engine Team 已提交
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
    void update_kparam_for_elemwise_side_effect(
            CompNode comp_node, Mode mode, const Param::DataType data_type);

    bool m_shape_computed = false;
    std::vector<KernParam> m_kern_param;
    TensorShape m_prev_ishp, m_prev_oshp;
    SubWorkspace m_workspace_spec[3];  //! tmp output[2], kern workspce

    /*!
     * some reduce mode (like SUM_SQR) has side effect of element-wise
     * trans. If this is the case and there is no kernel param,
     * m_apply_side_effect would be non-null
     */
    thin_function<void(const DeviceTensorND& in, const DeviceTensorND& out)>
1036
            m_apply_side_effect;
M
Megvii Engine Team 已提交
1037 1038 1039 1040
    std::unique_ptr<megdnn::Elemwise> m_elemwise_trans_opr;
    std::unique_ptr<megdnn::TypeCvt> m_typecvt_opr;
    std::unique_ptr<megdnn::Fill> m_fill_opr;
    DeviceTensorND m_side_affect_wkspc;
1041 1042
};

M
Megvii Engine Team 已提交
1043 1044
void Reduce::KernScheduler::setup_kern_params_layout_and_mode(
        Mode mode, DType inp_dtype, TensorShape& ishp,
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
        const Param::DataType data_type) {
    auto prev_dtype = inp_dtype;
    for (size_t idx = 0; idx < m_kern_param.size(); ++idx) {
        auto&& i = m_kern_param[idx];

#if !MEGDNN_DISABLE_FLOAT16
        if (idx == 0 && data_type == Param::DataType::FLOAT_O32xC32) {
            i.input.layout.dtype = inp_dtype;
            i.output.layout.dtype = dtype::Float32();
            i.kparam.data_type = data_type;
        } else if (data_type == Param::DataType::FLOAT_O16xC32) {
            i.input.layout.dtype = prev_dtype;
            if (idx + 1 == m_kern_param.size()) {
                i.output.layout.dtype = dtype::Float16();
                i.kparam.data_type = data_type;
M
Megvii Engine Team 已提交
1060
            } else {
1061 1062 1063 1064 1065 1066
                i.output.layout.dtype = dtype::Float32();
                i.kparam.data_type = Param::DataType::FLOAT_O32xC32;
            }
        } else
#endif
        {
M
Megvii Engine Team 已提交
1067 1068 1069
            mgb_assert(
                    data_type == Param::DataType::DEFAULT ||
                    (data_type == Param::DataType::FLOAT_O32xC32 && idx));
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
            i.input.layout.dtype = prev_dtype;
            i.output.layout.dtype = prev_dtype;
            i.kparam.data_type = Param::DataType::DEFAULT;
        }
        prev_dtype = i.output.layout.dtype;

        i.input.layout.init_contiguous_stride(ishp);
        ishp.shape[i.kparam.axis] = 1;
        i.output.layout.init_contiguous_stride(ishp);
    }
    if (mode == Mode::SUM_SQR) {
M
Megvii Engine Team 已提交
1081
        for (size_t i = 1; i < m_kern_param.size(); ++i)
1082 1083 1084 1085 1086
            m_kern_param[i].kparam.mode = Mode::SUM;
    }
}

void Reduce::KernScheduler::init_shapes(
M
Megvii Engine Team 已提交
1087
        megdnn::Reduce* opr, CompNode comp_node, DType inp_dtype, Mode mode,
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
        TensorShape ishp, TensorShape oshp, const Param::DataType data_type) {
    mgb_assert(ishp.ndim && oshp.ndim);

    if (ishp.eq_shape(m_prev_ishp) && oshp.eq_shape(m_prev_oshp))
        return;

    m_prev_ishp = ishp;
    m_prev_oshp = oshp;

    m_kern_param.clear();

    if (oshp.is_scalar()) {
        // if ishp is non-contiguous, add_layout_constraint_contiguous would be
        // added; so we do not have to worry about this
        ishp.shape[0] = ishp.total_nr_elems();
        ishp.ndim = 1;
    }

M
Megvii Engine Team 已提交
1106 1107
    mgb_assert(
            oshp.ndim == ishp.ndim,
1108 1109 1110
            "input and output ndim mismatch for reduction: ishp=%s oshp=%s",
            ishp.to_string().c_str(), oshp.to_string().c_str());

M
Megvii Engine Team 已提交
1111
    for (size_t i = 0; i < ishp.ndim; ++i) {
1112
        if (ishp.shape[i] != oshp.shape[i]) {
M
Megvii Engine Team 已提交
1113 1114
            mgb_assert(
                    oshp.shape[i] == 1,
1115 1116 1117 1118 1119 1120
                    "input and output shape mismatch for reduction: "
                    "ishp=%s oshp=%s",
                    ishp.to_string().c_str(), oshp.to_string().c_str());
        }
    }

M
Megvii Engine Team 已提交
1121
    auto remove_axis = [](TensorShape& shp, size_t ax) {
1122
        mgb_assert(shp.ndim > 1);
M
Megvii Engine Team 已提交
1123
        for (auto i = ax + 1; i < shp.ndim; ++i)
1124
            shp.shape[i - 1] = shp.shape[i];
M
Megvii Engine Team 已提交
1125
        --shp.ndim;
1126 1127 1128
    };

    // collapse consecutive shape-1 axes in oshp
M
Megvii Engine Team 已提交
1129
    for (size_t i = 0; i < oshp.ndim; ++i) {
1130 1131
        auto start = i;
        while (i < oshp.ndim && oshp.shape[i] == 1)
M
Megvii Engine Team 已提交
1132
            ++i;
1133 1134

        if (start + 1 < i) {
M
Megvii Engine Team 已提交
1135
            for (auto j = start + 1; j < i; ++j)
1136 1137
                ishp.shape[start] *= ishp.shape[j];

M
Megvii Engine Team 已提交
1138
            for (auto j = start + 1; j < i; ++j) {
1139 1140 1141 1142 1143 1144 1145 1146
                remove_axis(ishp, start + 1);
                remove_axis(oshp, start + 1);
            }

            i = start;
        }
    }

M
Megvii Engine Team 已提交
1147
    for (uint32_t i = 0; i < ishp.ndim; ++i) {
1148 1149 1150 1151 1152 1153
        if (ishp.shape[i] != oshp.shape[i]) {
            mgb_assert(oshp.shape[i] == 1);
            m_kern_param.push_back({mode, static_cast<int32_t>(i)});
        }
    }
    // sort according to reduction size, so workspace can be smaller
M
Megvii Engine Team 已提交
1154 1155 1156
    small_sort(
            m_kern_param.begin(), m_kern_param.end(),
            [&](const KernParam& a, const KernParam& b) {
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
                return ishp.shape[a.kparam.axis] > ishp.shape[b.kparam.axis];
            });

    // init kparam input/output layout
    setup_kern_params_layout_and_mode(mode, inp_dtype, ishp, data_type);

    // init workspace size
    memset(m_workspace_spec, 0, sizeof(m_workspace_spec));

    for (auto&& i : m_kern_param) {
        opr->param() = i.kparam;
M
Megvii Engine Team 已提交
1168
        i.workspace.size = opr->get_workspace_in_bytes(i.input.layout, i.output.layout);
1169 1170 1171 1172 1173 1174
        update_max(m_workspace_spec[2].size, i.workspace.size);
    }

    mgb_assert(ishp.eq_shape(oshp));

    if (m_kern_param.size() >= 2) {
M
Megvii Engine Team 已提交
1175
        m_workspace_spec[0].size = m_kern_param[1].input.layout.span().high_byte;
1176 1177
    }
    if (m_kern_param.size() >= 3) {
M
Megvii Engine Team 已提交
1178
        m_workspace_spec[1].size = m_kern_param[2].input.layout.span().high_byte;
1179 1180 1181
    }

    auto align = comp_node.get_mem_addr_alignment();
M
Megvii Engine Team 已提交
1182 1183 1184
    for (int i = 0; i < 2; ++i) {
        m_workspace_spec[i + 1].offset =
                get_aligned_power2(m_workspace_spec[i].end(), align);
1185 1186 1187 1188 1189 1190 1191 1192
    }

    update_kparam_for_elemwise_side_effect(comp_node, mode, data_type);

    m_shape_computed = true;
}

void Reduce::KernScheduler::update_kparam_for_elemwise_side_effect(
M
Megvii Engine Team 已提交
1193
        CompNode comp_node, Mode mode, const Param::DataType data_type) {
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
    m_apply_side_effect = nullptr;
    m_elemwise_trans_opr.reset();
    m_typecvt_opr.reset();
    if (!m_kern_param.empty()) {
        // no need to set m_apply_side_effect
        return;
    } /* else */
    // case A: input.layout == output.layout
    // case B: input.total_nr_elems == 1 and output is a scalar

    if (mode == Mode::SUM_SQR) {
M
Megvii Engine Team 已提交
1205 1206
        m_elemwise_trans_opr =
                intl::get_megdnn_handle(comp_node)->create_operator<megdnn::Elemwise>();
1207 1208 1209 1210
        m_elemwise_trans_opr->param() = {Elemwise::Mode::MUL};
    }
    if (data_type != Param::DataType::DEFAULT) {
        m_side_affect_wkspc = DeviceTensorND{comp_node, dtype::Float32()};
M
Megvii Engine Team 已提交
1211 1212
        m_typecvt_opr =
                intl::get_megdnn_handle(comp_node)->create_operator<megdnn::TypeCvt>();
1213 1214 1215 1216
    }
    if (!m_typecvt_opr && !m_elemwise_trans_opr)
        return;

M
Megvii Engine Team 已提交
1217
    m_apply_side_effect = [this](const DeviceTensorND& in, const DeviceTensorND& out) {
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
        if (m_typecvt_opr) {
            m_side_affect_wkspc.resize(in.shape());
        }
        if (!m_elemwise_trans_opr) {
            mgb_assert(m_typecvt_opr);
            m_typecvt_opr->exec(in.as_megdnn(), out.as_megdnn());
            return;
        }
        auto im = in.as_megdnn();
        megdnn::TensorND wm;
        if (m_typecvt_opr && in.dtype() != m_side_affect_wkspc.dtype()) {
            m_side_affect_wkspc.resize(in.shape());
            wm = m_side_affect_wkspc.as_megdnn();
            m_typecvt_opr->exec(im, wm);
        } else {
            wm = im;
        }
        if (m_typecvt_opr && wm.layout.dtype != out.dtype()) {
            m_elemwise_trans_opr->exec({wm, wm}, wm);
            m_typecvt_opr->exec(wm, out.as_megdnn());
        } else {
M
Megvii Engine Team 已提交
1239
            auto&& wshp = wm.layout;
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
            if (wshp.ndim != out.layout().ndim) {
                // to ensure that wkspc.ndim equals out.ndim in the case:
                // wkspc.shape=(1, 1, ..., 1) and out.shape=(1), otherwise it
                // may lead the 'TensorShape Dimension' assertion failed in
                // the following broadcast operator
                mgb_assert(wshp.total_nr_elems() == 1 && out.layout().ndim == 1);
                wshp.ndim = 1;
            }
            m_elemwise_trans_opr->exec({wm, wm}, out.as_megdnn());
        }
    };
}

void Reduce::KernScheduler::update_ptr(
M
Megvii Engine Team 已提交
1254 1255
        const DeviceTensorND& input, const DeviceTensorND& dest,
        const DeviceTensorND& workspace) {
1256 1257 1258 1259 1260
    auto dtype = dest.layout().dtype;
    mgb_assert(dtype.valid());
    mgb_assert(m_shape_computed);

    if (workspace_size()) {
M
Megvii Engine Team 已提交
1261 1262
        mgb_assert(
                workspace.layout().dtype == dtype::Byte() &&
1263 1264 1265 1266 1267 1268 1269
                workspace.layout().ndim == 1 &&
                workspace.shape()[0] >= workspace_size());
    }

    if (m_kern_param.empty())
        return;

M
Megvii Engine Team 已提交
1270 1271
    mgb_assert(
            input.layout().total_nr_elems() ==
1272
            m_kern_param[0].input.layout.total_nr_elems());
M
Megvii Engine Team 已提交
1273 1274
    mgb_assert(
            dest.shape().total_nr_elems() ==
1275
            m_kern_param.back().output.layout.total_nr_elems());
1276 1277 1278
    auto in_tensor = input.as_megdnn();
    in_tensor.layout = m_kern_param[0].input.layout;
    m_kern_param[0].input = in_tensor;
1279

M
Megvii Engine Team 已提交
1280 1281 1282 1283 1284 1285 1286 1287
    dt_byte *workspace_begin = workspace_size()
                                     ? const_cast<dt_byte*>(workspace.raw_ptr())
                                     : nullptr,
            *tmp_reduce_ptr[2] =
                    {workspace_begin + m_workspace_spec[0].offset,
                     workspace_begin + m_workspace_spec[1].offset},
            *kern_workspace = workspace_begin + m_workspace_spec[2].offset;
    for (size_t i = 0; i < m_kern_param.size() - 1; ++i) {
1288
        auto optr = tmp_reduce_ptr[i % 2];
1289 1290
        m_kern_param[i].output.reset_ptr(optr);
        m_kern_param[i + 1].input.reset_ptr(optr);
1291
    }
M
Megvii Engine Team 已提交
1292
    for (auto&& i : m_kern_param)
1293
        i.workspace.raw_ptr = kern_workspace;
1294 1295 1296
    auto out_tensor = dest.as_megdnn();
    out_tensor.layout = m_kern_param.back().output.layout;
    m_kern_param.back().output = out_tensor;
1297 1298 1299
}

void Reduce::KernScheduler::execute(
M
Megvii Engine Team 已提交
1300
        megdnn::Reduce* opr, const DeviceTensorND& input, const DeviceTensorND& dest) {
1301 1302 1303 1304 1305 1306 1307
    if (m_apply_side_effect) {
        mgb_assert(m_kern_param.empty());
        m_apply_side_effect(input, dest);
        return;
    }

    mgb_assert(!m_kern_param.empty());
1308 1309 1310 1311 1312

    // empty input
    if (input.shape_valid() && input.empty()) {
        auto mode = m_kern_param[0].kparam.mode;
        if (!m_fill_opr) {
M
Megvii Engine Team 已提交
1313 1314
            m_fill_opr = intl::get_megdnn_handle(dest.comp_node())
                                 ->create_operator<megdnn::Fill>();
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
        }
        std::string err_msg;
        switch (mode) {
            case Reduce::Mode::SUM:
                if (!dest.empty()) {
                    m_fill_opr->param() = 0;
                    m_fill_opr->exec(dest.as_megdnn(), {});
                }
                break;
            case Reduce::Mode::PRODUCT:
                if (!dest.empty()) {
                    m_fill_opr->param() = 1;
                    m_fill_opr->exec(dest.as_megdnn(), {});
                }
                break;
            case Reduce::Mode::MEAN:
M
Megvii Engine Team 已提交
1331 1332
                err_msg = "mean";
                break;
1333
            case Reduce::Mode::MIN:
M
Megvii Engine Team 已提交
1334 1335
                err_msg = "min";
                break;
1336
            case Reduce::Mode::MAX:
M
Megvii Engine Team 已提交
1337 1338
                err_msg = "max";
                break;
1339
            case Reduce::Mode::SUM_SQR:
M
Megvii Engine Team 已提交
1340 1341
                err_msg = "sum_sqr";
                break;
1342 1343 1344 1345 1346
            default:
                mgb_throw(MegBrainError, "bad reduce mode");
        }
        if (!err_msg.empty()) {
            mgb_throw(
M
Megvii Engine Team 已提交
1347 1348
                    MegBrainError, "empty input is not allowed for reduce mode: %s",
                    err_msg.c_str());
1349 1350 1351
        }
        return;
    }
M
Megvii Engine Team 已提交
1352 1353
    mgb_assert(
            input.layout().is_contiguous() &&
1354 1355
            input.raw_ptr() == m_kern_param[0].input.raw_ptr() &&
            dest.raw_ptr() == m_kern_param.back().output.raw_ptr());
M
Megvii Engine Team 已提交
1356
    for (auto&& i : m_kern_param) {
1357 1358 1359 1360 1361 1362 1363 1364 1365
        opr->param() = i.KernParam::kparam;
        opr->exec(i.input, i.output, i.workspace);
    }
}

class Reduce::OutTensorShapeExtender {
public:
    OutTensorShapeExtender(const TensorShape& ishp, const TensorShape& oshp)
            : m_oshp(oshp) {
M
Megvii Engine Team 已提交
1366 1367 1368 1369 1370 1371
        mgb_assert(
                oshp.ndim <= ishp.ndim,
                "output ndim should be less and equal than input ndim for "
                "reduction: "
                "ishp=%s oshp=%s",
                ishp.to_string().c_str(), oshp.to_string().c_str());
1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
        // Ex. ishp = (a, b, c, d), oshp = (c, d)
        if (!oshp.is_scalar() && ishp.ndim != oshp.ndim) {
            size_t ndim_diff = ishp.ndim - oshp.ndim;
            auto&& canonized_oshp = m_canonized_oshp_storage.emplace(oshp);
            for (size_t i = 0; i < ishp.ndim; ++i)
                if (i < ndim_diff)
                    canonized_oshp[i] = 1;
                else
                    canonized_oshp[i] = oshp[i - ndim_diff];
            canonized_oshp.ndim = ishp.ndim;
        }
    }

    const TensorShape& get() const {
        return m_canonized_oshp_storage.valid() ? m_canonized_oshp_storage.val()
                                                : m_oshp;
    }

private:
    Maybe<TensorShape> m_canonized_oshp_storage;
    const TensorShape& m_oshp;
};

MGB_DYN_TYPE_OBJ_FINAL_IMPL(Reduce);
M
Megvii Engine Team 已提交
1396 1397 1398 1399 1400 1401 1402 1403 1404
Reduce::Reduce(
        VarNode* inp, VarNode* target_shape, const Param& param,
        const OperatorNodeConfig& config)
        : Super{inp->owner_graph(),
                config,
                ssprintf("reduce%d", static_cast<int>(param.mode)),
                {inp}},
          m_param{param},
          m_kern_scheduler{std::make_unique<KernScheduler>()} {
1405 1406 1407 1408
    add_input({inp});

    if (inp->dtype().enumv() == DTypeEnum::Quantized8Asymm &&
        inp->dtype().category() == DTypeCategory::QUANTIZED) {
M
Megvii Engine Team 已提交
1409 1410 1411 1412 1413 1414 1415 1416 1417
        mgb_assert(
                param.mode != Param::Mode::PRODUCT,
                "Reduce does not support PRODUCT mode on quantized input");
        mgb_assert(
                param.mode != Param::Mode::SUM_SQR,
                "Reduce does not support SUM_SQR mode on quantized input");
        mgb_assert(
                param.mode != Param::Mode::SUM,
                "Reduce does not support SUM mode on quantized input");
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
    }

    DType out_dtype;
    switch (param.data_type) {
        case Param::DataType::DEFAULT:
            out_dtype = inp->dtype();
            break;
#if !MEGDNN_DISABLE_FLOAT16
        case Param::DataType::FLOAT_O16xC32:
            out_dtype = dtype::Float16();
            break;
        case Param::DataType::FLOAT_IO16xC32:
            mgb_assert(false);
#endif
        case Param::DataType::FLOAT_O32xC32:
            out_dtype = dtype::Float32();
            break;
        case Param::DataType::QUINT_I8xO32:
            out_dtype = dtype::QuantizedS32(
                    inp->dtype().param<dtype::Quantized8Asymm>().scale);
            break;
        case Param::DataType::QINT_I8xO32:
M
Megvii Engine Team 已提交
1440 1441
            out_dtype =
                    dtype::QuantizedS32(inp->dtype().param<dtype::QuantizedS8>().scale);
1442 1443
            break;
        default:
M
Megvii Engine Team 已提交
1444
            mgb_throw(GraphError, "invalid param data_type: %d", int(param.data_type));
1445
    }
M
Megvii Engine Team 已提交
1446
    add_output(None)->add_flag(VarNode::Flag::ALLOW_EMPTY_SHAPE).dtype(out_dtype);
1447 1448 1449 1450 1451
    cg::add_workspace_output(this);

    add_equivalence_component<PODHash<Param>>(&m_param);

    if (param.axis >= -MEGDNN_MAX_NDIM && param.axis < MEGDNN_MAX_NDIM) {
M
Megvii Engine Team 已提交
1452 1453
        mgb_throw_if(
                target_shape, GraphError,
1454 1455 1456
                "could not specify both axis and target shape");
        m_is_symtshp = false;
    } else {
M
Megvii Engine Team 已提交
1457 1458
        mgb_throw_if(
                !target_shape, GraphError, "neither axis or target_shape specified");
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
        add_input({target_shape});
        m_is_symtshp = true;

        outshape_by_symvar_enable(0, 1);
    }
}

Reduce::~Reduce() = default;

SymbolVar Reduce::make(
        SymbolVar src, Param param, SymbolVar target_shape,
M
Megvii Engine Team 已提交
1470
        const OperatorNodeConfig& config) {
1471
    if (param.data_type == Param::DataType::FLOAT_IO16xC32) {
M
Megvii Engine Team 已提交
1472 1473
        mgb_log_warn(
                "DataType FLOAT_IO16xC32 has been deprecated "
1474 1475 1476 1477
                "use FLOAT_O16xC32 instead");
        param.data_type = Param::DataType::FLOAT_O16xC32;
    }

M
Megvii Engine Team 已提交
1478
    if (param.mode == Mode::SUM && src.node()->owner_opr()->same_type<Elemwise>()) {
1479
        // replace sum(x^2) by sum_sqr(x)
M
Megvii Engine Team 已提交
1480
        auto&& opr = src.node()->owner_opr()->cast_final<Elemwise>();
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
        if (opr.param().mode == Elemwise::Mode::POW) {
            mgb_assert(opr.input().size() == 2);
            auto pow = SymbolVar{opr.input(1)}.as_immutable_scalar();
            if (pow.valid() && pow->get_cast<float>() == 2) {
                src = opr.input(0);
                param.mode = Mode::SUM_SQR;
            }
        }
    }
    return src.insert_single_output_opr<Reduce>(
            src.node(), target_shape.node(), param, config);
}

void Reduce::outshape_by_symvar_do_get_output_shape(
M
Megvii Engine Team 已提交
1495
        TensorShape& dest, const ShapeInferInfo& shpinfo) {
1496 1497 1498 1499 1500
    cg::copy_tensor_value_to_shape(dest, *shpinfo.shpval_inp_val.at(0));
}

void Reduce::init_output_static_infer_desc() {
    using namespace cg::static_infer;
M
Megvii Engine Team 已提交
1501
    auto&& mgr = owner_graph()->static_infer_manager();
1502 1503 1504 1505 1506 1507 1508

    // infer output shape
    if (m_is_symtshp) {
        // reduce to target shape
        Super::init_output_static_infer_desc();
    } else {
        // reduce along axis
M
Megvii Engine Team 已提交
1509
        auto infer_shape = [this](TensorShape& dest, const InpVal& inp) {
1510
            dest = inp.val.at(0).shape();
M
Megvii Engine Team 已提交
1511 1512 1513 1514 1515
            mgb_assert(
                    m_param.axis < static_cast<int>(dest.ndim) &&
                            m_param.axis >= -static_cast<int>(dest.ndim),
                    "invalid axis for reduction: shape=%s axis=%d",
                    dest.to_string().c_str(), m_param.axis);
1516 1517 1518 1519 1520 1521 1522
            int real_axis = m_param.axis;
            if (real_axis < 0)
                real_axis += dest.ndim;
            dest.shape[real_axis] = 1;
            return true;
        };
        mgr.register_shape_infer(
M
Megvii Engine Team 已提交
1523 1524
                output(0),
                {SourceType::DEP, {{input(0), DepType::SHAPE}}, infer_shape});
1525 1526 1527
    }

    // infer workspace
M
Megvii Engine Team 已提交
1528
    auto infer_workspace = [this](TensorShape& dest, const InpVal& inp) {
1529 1530 1531 1532 1533
        init_kern_sched_shape(inp.val[0].shape(), inp.val[1].shape());
        dest.ndim = 1;
        dest.shape[0] = m_kern_scheduler->workspace_size();
        return true;
    };
M
Megvii Engine Team 已提交
1534 1535 1536 1537
    mgr.register_shape_infer(
            output(1), {SourceType::DEP,
                        {{input(0), DepType::SHAPE}, {output(0), DepType::SHAPE}},
                        infer_workspace});
1538 1539 1540 1541

    // infer value

    static StaticInferOpr<megdnn::Reduce> static_infer_opr;
M
Megvii Engine Team 已提交
1542
    auto infer_value = [this](DeviceTensorND& dest, const InpVal& inp) {
1543 1544
        DeviceTensorND workspace;
        auto sopr = static_infer_opr.lock();
M
Megvii Engine Team 已提交
1545 1546
        perform(m_param.mode, dest, workspace, inp.val[0].value(), output(0)->dtype(),
                inp.val.at(1).shape(), sopr(), m_param.data_type);
1547 1548 1549
        return true;
    };

M
Megvii Engine Team 已提交
1550 1551 1552 1553
    mgr.register_value_infer(
            output(0), {SourceType::DEP,
                        {{input(0), DepType::VALUE}, {output(0), DepType::SHAPE}},
                        infer_value});
1554 1555
}

M
Megvii Engine Team 已提交
1556
void Reduce::init_kern_sched_shape(const TensorShape& ishp, const TensorShape& oshp) {
1557 1558
    OutTensorShapeExtender extender(ishp, oshp);
    auto&& canonized_oshp = extender.get();
M
Megvii Engine Team 已提交
1559 1560 1561
    m_kern_scheduler->init_shapes(
            static_cast<megdnn::Reduce*>(megdnn_opr()), comp_node(), input(0)->dtype(),
            m_param.mode, ishp, canonized_oshp, m_param.data_type);
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
}

cg::OperatorNodeBase::OprEventCallback Reduce::get_opr_event_callback() {
    auto on_mem_status_changed = [this]() {
        auto&& ishp = input(0)->shape();
        auto&& oshp = output(0)->shape();
        OutTensorShapeExtender extender(ishp, oshp);
        auto&& canonized_oshp = extender.get();
        m_kern_scheduler->check_shapes(input(0)->shape(), canonized_oshp);
        m_kern_scheduler->update_ptr(
                input(0)->dev_tensor(), output(0)->dev_tensor(),
M
Megvii Engine Team 已提交
1573
                output(1)->shape()[0] ? output(1)->dev_tensor() : DeviceTensorND{});
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
    };
    return {on_mem_status_changed};
}

void Reduce::mem_plan_fwd_in2out_readonly() {
    init_kern_sched_shape(input(0)->shape(), output(0)->shape());

    if (!m_kern_scheduler->has_actual_computing()) {
        // forward memory if no actual computing needed

        if (!output(0)->mem_plan().valid()) {
            // output(0) is dynamic but current is staic alloc phase (for
            // workspace)
            return;
        }
        auto&& ily = input(0)->layout();
        auto&& oly = output(0)->layout();
        const TensorLayout* fwd_spec = nullptr;
        Maybe<TensorLayout> ily_modified_storage;

        if (!ily.eq_shape(oly)) {
            auto&& ily_modified = ily_modified_storage.emplace(ily);
            mgb_assert(ily.ndim > oly.ndim);
            for (size_t i = 0; i < ily.ndim - oly.ndim; ++i)
                mgb_assert(ily.shape[i] == 1);
            ily_modified = ily_modified.reshape(oly);
            fwd_spec = &ily_modified;
        } else {
            fwd_spec = &ily;
        }
        m_mem_fwd_success = output(0)->set_fwd_in2out_readonly(
                input(0), SubTensorSpec::make_from_layout(*fwd_spec));
    }
}

void Reduce::add_input_layout_constraint() {
    if (!cg::is_static_var_shape(output(0))) {
        // output shape can not be inferred; require contiguous to be safe
        input(0)->add_layout_constraint_contiguous();
    } else {
M
Megvii Engine Team 已提交
1614 1615
        auto check = [this](const TensorLayout& ily) {
            auto&& mgr = owner_graph()->static_infer_manager();
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
            auto oshp = mgr.infer_shape(output(0));
            init_kern_sched_shape(ily, oshp);
            if (m_kern_scheduler->has_actual_computing())
                return ily.is_contiguous();
            return true;
        };
        input(0)->add_layout_constraint(check);
    }
}

void Reduce::scn_do_execute() {
    auto&& inp = input(0)->dev_tensor();
    auto&& out = output(0)->dev_tensor();
    auto&& ishp = input(0)->shape();
    auto&& oshp = output(0)->shape();
    const DeviceTensorND* out_ptr;
    Maybe<DeviceTensorND> canonized_storage;
    OutTensorShapeExtender extender(ishp, oshp);
    auto&& canonized_oshp = extender.get();
    if (canonized_oshp.ndim != out.shape().ndim) {
        auto&& canonized_out = canonized_storage.emplace(out);
        canonized_out.reset(
                canonized_out.storage(),
                canonized_out.layout().reshape(canonized_oshp));
        out_ptr = &canonized_out;
    } else {
        out_ptr = &out;
    }
    // shape initialized either in deducing workspace,
    // mem_plan_fwd_in2out_readonly, or check input layout
    m_kern_scheduler->check_shapes(inp.shape(), out_ptr->shape());

    if (m_kern_scheduler->has_actual_computing()) {
1649 1650 1651
        m_kern_scheduler->update_ptr(
                inp, *out_ptr,
                output(1)->shape()[0] ? output(1)->dev_tensor() : DeviceTensorND{});
M
Megvii Engine Team 已提交
1652 1653
        m_kern_scheduler->execute(
                static_cast<megdnn::Reduce*>(megdnn_opr()), inp, *out_ptr);
1654 1655 1656
    } else {
        // no reduction needed, just forward
        if (m_mem_fwd_success) {
M
Megvii Engine Team 已提交
1657 1658 1659 1660
            mgb_assert(
                    inp.raw_ptr() == out_ptr->raw_ptr() &&
                    out_ptr->layout().total_nr_elems() ==
                            inp.layout().total_nr_elems());
1661 1662
        } else {
            if (!out_ptr->shape().eq_shape(inp.shape())) {
M
Megvii Engine Team 已提交
1663 1664 1665
                mgb_assert(
                        out_ptr->shape().is_scalar() &&
                        inp.shape().total_nr_elems() == 1);
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
                out_ptr->sub(SubTensorSpec::make_from_layout(inp.layout()))
                        .copy_from_fixlayout(inp);
            } else {
                out_ptr->copy_from_fixlayout(inp);
            }
        }
    }
}

void Reduce::perform(
M
Megvii Engine Team 已提交
1676 1677 1678 1679 1680 1681
        Mode mode, DeviceTensorND& dest, DeviceTensorND& workspace,
        const DeviceTensorND& input, const DType& target_dtype,
        const TensorShape& target_shape, intl::UniqPtrWithCN<megdnn::Reduce>& opr,
        const Param::DataType data_type) {
    mgb_assert(
            !dest.storage().comp_node_valid() || opr.comp_node() == dest.comp_node());
1682
    KernScheduler ksched;
1683 1684
    OutTensorShapeExtender extender(input.shape(), target_shape);
    auto&& canonized_oshp = extender.get();
M
Megvii Engine Team 已提交
1685 1686 1687
    ksched.init_shapes(
            opr.get(), opr.comp_node(), input.layout().dtype, mode, input.shape(),
            canonized_oshp, data_type);
1688 1689

    if (!ksched.has_actual_computing()) {
M
Megvii Engine Team 已提交
1690
        mgb_assert(target_shape.total_nr_elems() == input.layout().total_nr_elems());
1691 1692 1693 1694 1695
        dest.copy_from(input);
        dest.reset(dest.storage(), {target_shape, dest.dtype()});
        return;
    }

M
Megvii Engine Team 已提交
1696
    workspace.comp_node(opr.comp_node()).dtype(dtype::Byte());
1697 1698
    size_t workspace_size = ksched.workspace_size();
    DeviceTensorND input_contig_storage;
M
Megvii Engine Team 已提交
1699
    const DeviceTensorND* input_contig = &input;
1700 1701 1702
    if (!input.layout().is_contiguous()) {
        auto offset = get_aligned_power2(
                workspace_size, opr.comp_node().get_mem_addr_alignment());
M
Megvii Engine Team 已提交
1703
        workspace_size = offset + input.dtype().size(input.shape().total_nr_elems());
1704 1705

        workspace.resize({workspace_size});
M
Megvii Engine Team 已提交
1706 1707 1708
        input_contig_storage
                .reset(workspace.storage().sub(offset), {input.shape(), input.dtype()})
                .copy_from(input);
1709 1710 1711 1712 1713 1714
        input_contig = &input_contig_storage;
    } else {
        workspace.resize({workspace_size});
    }

    opr.comp_node().activate();
1715
    dest.comp_node(opr.comp_node()).dtype(target_dtype).resize(target_shape);
1716 1717 1718 1719
    ksched.update_ptr(*input_contig, dest, workspace);
    ksched.execute(opr.get(), *input_contig, dest);
}

1720 1721
Reduce::NodeProp* Reduce::do_make_node_prop() const {
    auto ret = Super::do_make_node_prop();
M
Megvii Engine Team 已提交
1722
    ret->add_dep_type_existing_var(input(0), NodeProp::DepType::VALUE_ALLOW_EMPTY);
1723 1724 1725
    return ret;
}

1726
void Reduce::create_megdnn_opr() {
M
Megvii Engine Team 已提交
1727 1728
    set_megdnn_opr(
            intl::get_megdnn_handle(comp_node())->create_operator<megdnn::Reduce>());
1729 1730
}

1731
#if MGB_ENABLE_GRAD
1732
MGB_IMPL_OPR_GRAD(Reduce) {
M
Megvii Engine Team 已提交
1733
    for (size_t i = 1; i < opr.output().size(); ++i)
1734
        mgb_assert(!out_grad[i]);
1735
    if (wrt_idx || opr.input(0)->dtype().category() != DTypeCategory::FLOAT)
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
        return InvalidGrad::make(opr, wrt_idx);
    SymbolVar og{out_grad[0]}, iv{opr.input(0)}, ov{opr.output(0)};
    constexpr auto cmv = Elemwise::Mode::COND_LEQ_MOV;
    using Mode = Reduce::Mode;
    SymbolVar grad = [&]() {
        switch (opr.param().mode) {
            case Mode::SUM:
                return Broadcast::make(og, GetVarShape::make(iv));
            case Mode::SUM_SQR:
                return (og * og.make_scalar_dt(2) * iv);
            case Mode::PRODUCT:
                return ((og * ov) / iv);
            case Mode::MIN:
                return Elemwise::make({iv, ov, og}, cmv);
            case Mode::MAX:
                return Elemwise::make({ov, iv, og}, cmv);
            case Mode::MEAN: {
                auto og_shape = opr::GetVarShape::make(og),
M
Megvii Engine Team 已提交
1754 1755 1756 1757
                     iv_shape = opr::GetVarShape::make(iv),
                     scale =
                             div(opr::reduce_prod(og_shape, og_shape.make_scalar(1)),
                                 opr::reduce_prod(iv_shape, iv_shape.make_scalar(1)));
1758 1759 1760 1761 1762 1763 1764 1765 1766
                return scale * Broadcast::make(og, GetVarShape::make(iv));
            }
            default:
                mgb_throw(MegBrainError, "bad reduce mode");
        }
    }();
    grad = TypeCvt::make(grad, iv.dtype());
    return grad.node();
}
1767
#endif
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777

void Reduce::record_execute_deps(ExecDependencyArray& deps) {
    record_megdnn_opr(deps);
    m_kern_scheduler->record_execute_deps(deps);
}

/* =========================== PowC =========================== */

MGB_DYN_TYPE_OBJ_FINAL_IMPL(PowC);

M
Megvii Engine Team 已提交
1778 1779 1780
PowC::PowC(VarNode* i0, const Param& param, const OperatorNodeConfig& config)
        : Super(OperatorNodeBaseCtorParam{
                  i0->owner_graph(), config, ssprintf("powc_%g", param.exp), {i0}}) {
1781 1782 1783 1784 1785
    init_megdnn_opr(*this, param);
    add_input({i0});
    output(0)->add_flag(VarNode::Flag::ALLOW_EMPTY_SHAPE);
    intl::MegDNNOprInitPostCtor<PowC>::apply(*this);
}
1786

M
Megvii Engine Team 已提交
1787 1788
SymbolVar PowC::make(
        SymbolVar x, const Param& param, const OperatorNodeConfig& config) {
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
    if (almost_equal(param.exp, 1.f)) {
        return x;
    }
    if (almost_equal(param.exp, 0.f)) {
        return x.make_scalar_dt(1).broadcast(x.symshape());
    }
    return x.insert_single_output_opr<PowC>(x.node(), param, config);
}

void PowC::add_input_layout_constraint() {
    input(0)->add_layout_constraint_monotone();
}

void PowC::mem_plan_fwd_in2out_writable() {
    output(0)->set_fwd_in2out_writable(input(0));
}

void PowC::init_output_static_infer_desc() {
    Super::init_output_static_infer_desc();
    static StaticInferOpr<megdnn::PowC> static_infer_opr;
    using namespace cg::static_infer;

    auto infer_value = [this](DeviceTensorND& dest, const InpVal& inp) {
        auto infer_opr_lock = static_infer_opr.lock();
        auto&& infer_opr = infer_opr_lock();
        infer_opr->param() = this->param();
        auto&& ival = inp.val[0].value().as_megdnn();
        infer_opr->exec(ival, dest.resize(ival.layout).as_megdnn());
        return true;
    };
    owner_graph()->static_infer_manager().register_value_infer(
M
Megvii Engine Team 已提交
1820
            output(0), {SourceType::DEP, {{input(0), DepType::VALUE}}, infer_value});
1821 1822
}

1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
void PowC::scn_do_execute() {
    if (input(0)->dev_tensor().empty()) {
        mgb_assert(output(0)->dev_tensor().empty());
        return;
    }
    mgb_assert(!output(0)->dev_tensor().empty());
    Super::scn_do_execute();
}

PowC::NodeProp* PowC::do_make_node_prop() const {
    auto ret = Super::do_make_node_prop();
M
Megvii Engine Team 已提交
1834
    ret->add_dep_type_existing_var(input(0), NodeProp::DepType::VALUE_ALLOW_EMPTY);
1835 1836 1837
    return ret;
}

1838
#if MGB_ENABLE_GRAD
1839 1840 1841 1842 1843 1844
MGB_IMPL_OPR_GRAD(PowC) {
    auto exp = opr.param().exp;
    return (exp * SymbolVar{out_grad[0]} *
            PowC::make(opr.input(0), exp - 1, opr.config()))
            .node();
}
1845
#endif
1846 1847

// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}