tensor_reformat.cpp 142.2 KB
Newer Older
1 2 3 4
/**
 * \file src/gopt/impl/tensor_reformat.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13
 */

#include "megbrain/gopt/basic_arith.h"
M
Megvii Engine Team 已提交
14 15
#include "megbrain/gopt/gtrans.h"
#include "megbrain/gopt/inference.h"
16 17 18
#include "megbrain/graph/event.h"
#include "megbrain/opr/basic_arith.h"
#include "megbrain/opr/blas.h"
M
Megvii Engine Team 已提交
19 20 21
#include "megbrain/opr/dnn/batch_norm.h"
#include "megbrain/opr/dnn/convolution.h"
#include "megbrain/opr/dnn/local.h"
22 23
#include "megbrain/opr/dnn/pooling.h"
#include "megbrain/opr/imgproc.h"
M
Megvii Engine Team 已提交
24
#include "megbrain/opr/misc.h"
25
#include "megbrain/opr/nn_int.h"
M
Megvii Engine Team 已提交
26 27 28 29
#include "megbrain/opr/tensor_manip.h"
#include "megbrain/opr/utility.h"
#include "megbrain/serialization/opr_shallow_copy.h"
#include "megbrain/utils/shared_set.h"
30

31
#include "megdnn/opr_param_defs.h"
32 33
#include "megdnn/tensor_format.h"

34 35
#include "megbrain/opr/internal/megdnn_opr_wrapper.h"

36 37 38 39 40
#if MGB_ENABLE_TENSOR_RT
#include "megbrain/tensorrt/tensorrt_opr.h"
#endif

#include "megbrain/gopt/misc.h"
41 42 43 44
#include "megbrain/utils/hash_ct.h"

#include "midout.h"

45 46 47 48
#include "megbrain/gopt/reformat_manager.h"

#include "./global_layout_transform/utils.h"

49 50 51 52 53 54
MIDOUT_DECL(megbrain_tensor_reformat)
#define MIDOUT_B(tag) \
    MIDOUT_BEGIN(megbrain_tensor_reformat, midout_iv(MGB_HASH_STR(tag))) {
#define MIDOUT_E \
    }            \
    MIDOUT_END();
55 56 57

using namespace mgb;
using namespace gopt;
58
using ReformatKey = ReformatManager::ReformatKey;
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

/* ================ TensorReformatPass =============== */
/*!
 * \brief relayout placeholder opr
 *
 * RelayoutPlaceholder oprs act as the placeholders of the ComputingGraph
 * during graph opt pass `TensorReformatPass`. These oprs are introduced
 * into a ComputingGraph for conveniently discovering further optimize
 * opportunities (such as fuse consecutive relayouts, translate into
 * optimized implementations). They are canonized to have a shape infer, so
 * the ouput's shape can be correctly deduced during the opt pass.
 *
 * Note that the oprs in the ComputingGraph are only used as intermediate
 * representations before being translated to MegBrain oprs, so the
 * oprs should not get involved in any actual computing.
 */
75
// clang-format off
76
MGB_DEFINE_OPR_CLASS(TensorReformatPass::RelayoutPlaceholder,
77
                           cg::SingleCNOperatorNodeBase) // {
78
public:
79

80
    RelayoutPlaceholder(VarNode* src_var, const ReformatKey& key);
M
Megvii Engine Team 已提交
81

82 83 84 85 86 87 88 89 90
    /*!
     * \param src_var the input var
     * \param layout_type tensor layout transform type of this relayout
     * placeholder as described in LayoutType
     */
    static SymbolVar make(VarNode* src_var, const ReformatKey& key);
    const ReformatKey& key() const {
        return m_key;
    }
91 92

private:
93 94 95 96 97 98
    void init_output_static_infer_desc() override;
    void scn_do_execute() override;
    void init_output_comp_node() override;
    const ReformatKey m_key;
    VarNode* m_output;
};
99
MGB_DYN_TYPE_OBJ_FINAL_IMPL(TensorReformatPass::RelayoutPlaceholder);
100
// clang-format on
101 102

TensorReformatPass::RelayoutPlaceholder::RelayoutPlaceholder(
103
        VarNode* src_var, const ReformatKey& key)
104
        : Super(src_var->owner_graph(), {}, "RelayoutPlaceholder", {src_var}),
105
          m_key{key} {
106
    add_input({src_var});
107 108
    add_equivalence_component<PODHash<ReformatKey>>(&m_key);
    m_output = ReformatManager::instance().get(m_key)({src_var});
109 110 111 112 113 114 115 116 117 118 119 120 121 122
    add_output(None)->dtype(src_var->dtype());
}

void TensorReformatPass::RelayoutPlaceholder::scn_do_execute() {
    mgb_throw(InternalError, "RelayoutPlaceholder opr can not be executed");
}

void TensorReformatPass::RelayoutPlaceholder::init_output_comp_node() {
    output(0)->comp_node(input(0)->comp_node());
}

void TensorReformatPass::RelayoutPlaceholder::init_output_static_infer_desc() {
    using namespace cg::static_infer;
    auto&& mgr = owner_graph()->static_infer_manager();
123
    mgr.register_shape_infer(output(0), ShapeInferDesc::make_identity(m_output));
124 125 126
}

SymbolVar TensorReformatPass::RelayoutPlaceholder::make(
127
        VarNode* src_var, const ReformatKey& key) {
128
    return src_var->owner_graph()
129
            ->insert_opr(std::make_unique<RelayoutPlaceholder>(src_var, key))
130 131 132 133 134 135 136
            ->output(0);
}

void TensorReformatPass::insert_pass(OptState& opt) const {
    opt.set_var_replace_check_flag(m_var_replace_check_flag);
    auto rewriter = opt.graph().make_rewriter();
    VarNodeArray new_inp_cache;
M
Megvii Engine Team 已提交
137
    auto on_opr = [this, &opt, &rewriter, &new_inp_cache](OperatorNodeBase* opr) {
138 139 140 141 142 143 144 145 146 147
        auto it = m_opr_replace_func.find(opr->dyn_typeinfo());
        if (it != m_opr_replace_func.end()) {
            auto& new_inp = new_inp_cache;
            new_inp.clear();
            new_inp.reserve(opr->input().size());
            for (auto&& inp : opr->input()) {
                new_inp.push_back(rewriter.get_var(inp));
            }
            auto new_opr = (it->second)(opr, new_inp);
            auto &&out0 = opr->output(), &&out1 = new_opr->output();
M
Megvii Engine Team 已提交
148 149 150 151 152 153 154
            mgb_assert(
                    out0.size() == out1.size(),
                    "bad opr replace: src=%s{%s} dst=%s{%s}, "
                    "src.size=%zu "
                    "dst.size=%zu",
                    opr->cname(), opr->dyn_typeinfo()->name, new_opr->cname(),
                    new_opr->dyn_typeinfo()->name, out0.size(), out1.size());
155 156
            for (size_t i = 0; i < out0.size(); ++i) {
                if (!out0[i]->contain_flag(VarNode::Flag::VOLATILE_CONTENT)) {
M
Megvii Engine Team 已提交
157
                    mgb_assert(!out1[i]->contain_flag(VarNode::Flag::VOLATILE_CONTENT));
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
                    auto src = out0[i];
                    auto dst = out1[i];
                    if (opt.graph().endpoint_contain(src)) {
                        // additional process on endpoint var node
                        dst = on_graph_endpoint_var(dst, src);
                    }
                    rewriter.replace_var(src, dst, nullptr);
                }
            }
        } else {
            rewriter.auto_replace_outputs(opr);
        }
    };
    opt.graph().iter(on_opr);
    rewriter.apply_inplace();
}

void TensorReformatPass::translate_pass(OptState& opt) const {
    auto rewriter = opt.graph().make_rewriter();
177
    auto on_opr = [&rewriter](OperatorNodeBase* opr) {
178 179 180
        if (opr->same_type<RelayoutPlaceholder>()) {
            auto ph = try_cast_as_op<RelayoutPlaceholder>(opr);
            auto new_inp = rewriter.get_var(opr->input(0));
M
Megvii Engine Team 已提交
181 182 183 184
            auto new_var = ReformatManager::instance().get(ph->key())({new_inp});
            rewriter.replace_var(
                    opr->output(0), new_var,
                    mgb_cstr_log("replace relayout placeholder"));
185 186 187 188 189 190 191 192 193
            return;
        }
        rewriter.auto_replace_outputs(opr);
    };
    opt.graph().iter(on_opr);
    rewriter.apply_inplace();
}

void TensorReformatPass::apply(OptState& opt) const {
194
    MIDOUT_B("TensorReformatPass::apply")
195 196
    insert_pass(opt);
    translate_pass(opt);
197
    MIDOUT_E
198 199 200
}

/* ================ EnableTensorCorePass =============== */
M
Megvii Engine Team 已提交
201 202
VarNode* EnableTensorCorePass::on_graph_endpoint_var(
        VarNode* new_var, VarNode* orig_var) const {
203
    if (!orig_var->shape().eq_shape(new_var->shape())) {
M
Megvii Engine Team 已提交
204 205 206
        return RelayoutPlaceholder::make(
                       new_var,
                       ReformatKey{TensorFormats::NCHWc32, TensorFormats::NCHWc4})
207 208 209 210 211
                .node();
    }
    return new_var;
}

M
Megvii Engine Team 已提交
212 213
std::unique_ptr<EnableTensorCorePass> EnableTensorCorePass::
        make_tensorcore_converter() {
214
    MIDOUT_B("EnableTensorCorePass::make")
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
    // replace rule for conv bias opr
    auto replace_conv_bias_opr = [](OperatorNodeBase* opr,
                                    const VarNodeArray& new_inp) {
        using Param = megdnn::param::ConvBias;
        using Format = Param::Format;
        using Sparse = Param::Sparse;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& conv_bias = opr->cast_final_safe<opr::ConvBiasForward>();
        if (conv_bias.param().format != Format::NCHW4 ||
            conv_bias.output(0)->dtype().enumv() != DTypeEnum::QuantizedS8) {
            size_t nr_inps = opr->input().size();
            bool shape_has_changed = false;
            for (size_t i = 0; i < nr_inps; ++i) {
                if (!opr->input(i)->shape().eq_shape(new_inp[i]->shape())) {
                    shape_has_changed = true;
                }
            }
            MGB_MARK_USED_VAR(shape_has_changed);
            mgb_assert(
                    !shape_has_changed,
                    "EnableTensorCorePass assumes that the shape of inputs of"
                    "ConvBias operators whose output dtype is not QuantizedS8 "
                    "can not be changed in this opt pass");
M
Megvii Engine Team 已提交
238
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
239
        }
M
Megvii Engine Team 已提交
240 241 242 243
        mgb_assert(
                opr->input(1)->shape().eq_shape(new_inp[1]->shape()),
                "EnableTensorCorePass assumes that filter tensor of "
                "conv_bias operator can not be changed by other operators");
244 245 246 247 248 249 250 251
        VarNode* orig_filter = opr->input(1);
        auto is_nchw4 = [](TensorShape shape) -> bool {
            return shape.ndim == 5 && shape[4] == 4;
        };
        auto is_nchw32 = [](TensorShape shape) -> bool {
            return shape.ndim == 5 && shape[4] == 32;
        };
        bool can_replace_nchw32 = false;
M
Megvii Engine Team 已提交
252
        VarNode *src = nullptr, *weight = nullptr, *bias = nullptr, *z_inp = nullptr;
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
        // process src tensor
        if (is_nchw4(new_inp[0]->shape())) {  // new input is NCHW4 layout
            size_t group = 1, icpg, ocpg;
            if (conv_bias.param().sparse == Sparse::DENSE) {
                icpg = orig_filter->shape()[1] * 4;
                ocpg = orig_filter->shape()[0];
            } else {
                mgb_assert(conv_bias.param().sparse == Sparse::GROUP);
                group = orig_filter->shape()[0];
                icpg = orig_filter->shape()[2];
                ocpg = orig_filter->shape()[1];
                if (icpg == 1 && ocpg == 1) {  // channel wise conv
                    group *= 4;
                } else {
                    icpg *= 4;
                }
            }
            // nchw32 layout need that input width and height are larger than 3
            size_t ih = new_inp[0]->shape()[2], iw = new_inp[0]->shape()[3];
M
Megvii Engine Team 已提交
272
            if (group == 1 && ocpg % 32 == 0 && icpg % 32 == 0 && ih >= 3 && iw >= 3) {
273
                auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
274 275
                        new_inp[0],
                        ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHWc32});
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
                src = symvar.node();
                can_replace_nchw32 = true;
            } else {
                src = new_inp[0];
            }
        } else {  // new input is NCHW32 layout
            mgb_assert(is_nchw32(new_inp[0]->shape()));
            size_t group = 1, ocpg;
            if (conv_bias.param().sparse == Sparse::DENSE) {
                ocpg = orig_filter->shape()[0];
            } else {
                mgb_assert(conv_bias.param().sparse == Sparse::GROUP);
                size_t icpg = orig_filter->shape()[2];
                ocpg = orig_filter->shape()[1];
                if (icpg == 1 && ocpg == 1) {
                    group *= 4;
                } else {
                    icpg *= 4;
                }
            }
            size_t ih = new_inp[0]->shape()[2], iw = new_inp[0]->shape()[3];
            if (group == 1 && ocpg % 32 == 0 && ih >= 3 && iw >= 3) {
                can_replace_nchw32 = true;
                src = new_inp[0];
            } else {
                auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
302 303
                        new_inp[0],
                        ReformatKey{TensorFormats::NCHWc32, TensorFormats::NCHWc4});
304 305 306 307 308 309 310
                src = symvar.node();
            }
        }
        // process filter tensor
        if (can_replace_nchw32) {
            auto symvar = RelayoutPlaceholder::make(
                    new_inp[1],
311
                    ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHWc32});
312 313 314 315 316 317 318 319 320 321 322 323 324 325
            weight = symvar.node();
        } else {
            weight = new_inp[1];
        }
        if (new_inp.size() == 2) {
            if (can_replace_nchw32) {
                auto param = conv_bias.param();
                param.format = Format::NCHW32;
                auto new_opr = opr::ConvBiasForward::make(
                        src, weight, param, conv_bias.execution_policy(),
                        conv_bias.config());
                return new_opr.node()->owner_opr();
            } else {
                VarNodeArray inps{src, weight};
M
Megvii Engine Team 已提交
326 327
                auto new_opr =
                        serialization::copy_opr_shallow(*opr, inps, opr->config());
328 329 330 331 332 333 334
                return new_opr;
            }
        }
        auto process_inp = [&](VarNode* inp) -> VarNode* {
            if (can_replace_nchw32) {
                if (is_nchw4(inp->shape())) {
                    auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
335 336
                            inp,
                            ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHWc32});
337 338 339 340 341 342 343 344 345 346 347
                    return symvar.node();
                } else {
                    mgb_assert(is_nchw32(inp->shape()));
                    return inp;
                }
            } else {
                if (is_nchw4(inp->shape())) {
                    return inp;
                } else {
                    mgb_assert(is_nchw32(inp->shape()));
                    auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
348 349
                            inp,
                            ReformatKey{TensorFormats::NCHWc32, TensorFormats::NCHWc4});
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
                    return symvar.node();
                }
            }
        };
        // process bias tensor
        bias = process_inp(new_inp[2]);
        if (new_inp.size() == 3) {
            if (can_replace_nchw32) {
                auto param = conv_bias.param();
                param.format = Format::NCHW32;
                auto new_opr = opr::ConvBiasForward::make(
                        src, weight, bias, param, conv_bias.execution_policy(),
                        conv_bias.config());
                return new_opr.node()->owner_opr();
            } else {
                VarNodeArray inps{src, weight, bias};
M
Megvii Engine Team 已提交
366 367
                auto new_opr =
                        serialization::copy_opr_shallow(*opr, inps, opr->config());
368 369 370 371 372 373 374 375 376
                return new_opr;
            }
        }
        // process z_inp tensor
        z_inp = process_inp(new_inp[3]);
        if (can_replace_nchw32) {
            auto param = conv_bias.param();
            param.format = Format::NCHW32;
            auto new_opr = opr::ConvBiasForward::make(
M
Megvii Engine Team 已提交
377 378
                    src, weight, bias, z_inp, param, conv_bias.execution_policy(),
                    conv_bias.config());
379 380 381
            return new_opr.node()->owner_opr();
        }
        VarNodeArray inps{src, weight, bias, z_inp};
M
Megvii Engine Team 已提交
382
        auto new_opr = serialization::copy_opr_shallow(*opr, inps, opr->config());
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
        return new_opr;
    };
    // replace rule for elemwise like opr
    // for oprs support NCHW4 and NCHW32 layout
    auto replace_elemwise_like_opr = [](OperatorNodeBase* opr,
                                        const VarNodeArray new_inp) {
        mgb_assert(opr->input().size() == new_inp.size());
        size_t nr_inps = new_inp.size();
        size_t nr_shape_changed = 0;
        for (size_t i = 0; i < nr_inps; ++i) {
            if (!opr->input(i)->shape().eq_shape(new_inp[i]->shape())) {
                nr_shape_changed++;
            }
        }
        if (nr_shape_changed) {
            auto inps = new_inp;
M
Megvii Engine Team 已提交
399
            if (nr_shape_changed >= nr_inps / 2) {  // NCHW32 > NCHW4 -> use NCHW32
400 401 402
                for (size_t i = 0; i < nr_inps; ++i) {
                    if (opr->input(i)->shape().eq_shape(new_inp[i]->shape())) {
                        auto symvar = RelayoutPlaceholder::make(
403
                                new_inp[i],
M
Megvii Engine Team 已提交
404 405
                                ReformatKey{
                                        TensorFormats::NCHWc4, TensorFormats::NCHWc32});
406 407 408 409 410 411 412
                        inps[i] = symvar.node();
                    }
                }
            } else {  // NCHW32 < NCHW4 -> use NCHW4
                for (size_t i = 0; i < nr_inps; ++i) {
                    if (!opr->input(i)->shape().eq_shape(new_inp[i]->shape())) {
                        auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
413 414 415
                                new_inp[i],
                                ReformatKey{
                                        TensorFormats::NCHWc32, TensorFormats::NCHWc4});
416 417 418 419 420 421 422 423 424
                        inps[i] = symvar.node();
                    }
                }
            }
            return serialization::copy_opr_shallow(*opr, inps, opr->config());
        }
        return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
    };
    // for oprs only supports NCHW4 layout
M
Megvii Engine Team 已提交
425
    auto replace_inps_to_nchw4 = [](OperatorNodeBase* opr, const VarNodeArray new_inp) {
426 427 428 429
        mgb_assert(opr->input().size() == new_inp.size());
        VarNodeArray inps = new_inp;
        for (size_t i = 0; i < opr->input().size(); ++i) {
            if (!opr->input(i)->shape().eq_shape(new_inp[i]->shape())) {
M
Megvii Engine Team 已提交
430 431 432 433 434
                mgb_assert(
                        opr->input(i)->shape().ndim == 5 &&
                        opr->input(i)->shape()[4] == 4);
                mgb_assert(
                        new_inp[i]->shape().ndim == 5 && new_inp[i]->shape()[4] == 32);
435
                auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
436 437
                        new_inp[i],
                        ReformatKey{TensorFormats::NCHWc32, TensorFormats::NCHWc4});
438 439 440
                inps[i] = symvar.node();
            }
        }
M
Megvii Engine Team 已提交
441
        auto new_opr = serialization::copy_opr_shallow(*opr, inps, opr->config());
442 443
        return new_opr;
    };
M
Megvii Engine Team 已提交
444
    auto replace_non_nchw4_opr = [](OperatorNodeBase* opr, const VarNodeArray new_inp) {
445 446 447 448 449 450 451
        size_t nr_inps = opr->input().size();
        bool shape_has_changed = false;
        for (size_t i = 0; i < nr_inps; ++i) {
            if (!opr->input(i)->shape().eq_shape(new_inp[i]->shape())) {
                shape_has_changed = true;
            }
        }
M
Megvii Engine Team 已提交
452 453 454 455 456 457
        mgb_assert(
                !shape_has_changed,
                "EnableTensorCorePass assumes that inputs' shape of "
                "non-nchw4 operators "
                "can not be changed in this opt "
                "pass");
458 459
        return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
    };
M
Megvii Engine Team 已提交
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
    auto replace_warp_affine_opr = [replace_inps_to_nchw4, replace_non_nchw4_opr](
                                           OperatorNodeBase* opr,
                                           const VarNodeArray new_inp) {
        using Param = opr::WarpAffineForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& warp = opr->cast_final_safe<opr::WarpAffineForward>();
        if (warp.param().format != Format::NCHW4) {
            return replace_non_nchw4_opr(opr, new_inp);
        }
        return replace_inps_to_nchw4(opr, new_inp);
    };
    auto replace_warp_perspective_opr = [replace_inps_to_nchw4, replace_non_nchw4_opr](
                                                OperatorNodeBase* opr,
                                                const VarNodeArray new_inp) {
        using Param = opr::WarpPerspectiveForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& warp = opr->cast_final_safe<opr::WarpPerspectiveForward>();
        if (warp.param().format != Format::NCHW4) {
            return replace_non_nchw4_opr(opr, new_inp);
        }
        return replace_inps_to_nchw4(opr, new_inp);
    };
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
    auto replace_resize_opr = [replace_inps_to_nchw4, replace_non_nchw4_opr](
                                      OperatorNodeBase* opr,
                                      const VarNodeArray new_inp) {
        using Param = opr::ResizeForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& resize = opr->cast_final_safe<opr::ResizeForward>();
        if (resize.param().format != Format::NCHW4) {
            return replace_non_nchw4_opr(opr, new_inp);
        }
        return replace_inps_to_nchw4(opr, new_inp);
    };
    auto replace_pooling_opr = [replace_non_nchw4_opr](
                                       OperatorNodeBase* opr,
                                       const VarNodeArray new_inp) {
        using Param = opr::PoolingForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& pooling = opr->cast_final_safe<opr::PoolingForward>();
        if (pooling.param().format != Format::NCHW4) {
            return replace_non_nchw4_opr(opr, new_inp);
        }
        size_t nr_inps = opr->input().size();
        MGB_MARK_USED_VAR(nr_inps);
        mgb_assert(nr_inps == 1);
509 510 511 512 513 514
        size_t nr_channels = opr->input(0)->shape()[1] * 4;
        if (nr_channels % 32 == 0) {  // use nchw32 format
            VarNode* new_inp_var = new_inp[0];
            if (opr->input(0)->shape().eq_shape(new_inp[0]->shape())) {
                new_inp_var =
                        RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
515 516 517
                                new_inp[0],
                                ReformatKey{
                                        TensorFormats::NCHWc4, TensorFormats::NCHWc32})
518 519
                                .node();
            } else {
M
Megvii Engine Team 已提交
520 521 522 523 524
                mgb_assert(
                        opr->input(0)->shape().ndim == 5 &&
                        opr->input(0)->shape()[4] == 4);
                mgb_assert(
                        new_inp[0]->shape().ndim == 5 && new_inp[0]->shape()[4] == 32);
525
            }
526 527
            auto new_param = pooling.param();
            new_param.format = Format::NCHW32;
528 529
            auto new_pooling = opr::PoolingForward::make(
                    new_inp_var, new_param, pooling.execution_policy(), opr->config());
530 531 532 533 534 535 536 537 538 539 540 541
            return new_pooling.node()->owner_opr();
        }
        return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
    };
    auto ret = std::make_unique<EnableTensorCorePass>();
    ret->set_var_replace_check_flag(VarReplaceCheckFlag::NOCHECK);
    auto&& replace_func = ret->m_opr_replace_func;
    replace_func[opr::ConvBiasForward::typeinfo()] = replace_conv_bias_opr;

    // elemwise like
    replace_func[opr::Elemwise::typeinfo()] = replace_elemwise_like_opr;
    replace_func[opr::TypeCvt::typeinfo()] = replace_elemwise_like_opr;
M
Megvii Engine Team 已提交
542
    replace_func[opr::ElemwiseMultiType::typeinfo()] = replace_elemwise_like_opr;
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
    replace_func[opr::PowC::typeinfo()] = replace_elemwise_like_opr;

    // format aware
    replace_func[opr::PoolingForward::typeinfo()] = replace_pooling_opr;
    replace_func[opr::WarpAffineForward::typeinfo()] = replace_warp_affine_opr;
    replace_func[opr::WarpPerspectiveForward::typeinfo()] =
            replace_warp_perspective_opr;
    replace_func[opr::ResizeForward::typeinfo()] = replace_resize_opr;

    // to nchw4
    replace_func[opr::Reduce::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::Concat::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::Reshape::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::GetVarShape::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::Dimshuffle::typeinfo()] = replace_inps_to_nchw4;
558
    replace_func[opr::BatchConvBias::typeinfo()] = replace_inps_to_nchw4;
559
    return ret;
560
    MIDOUT_E
561 562 563
}

/* ================ EnableCHWN4Pass =============== */
M
Megvii Engine Team 已提交
564 565
VarNode* EnableCHWN4Pass::on_graph_endpoint_var(
        VarNode* new_var, VarNode* /* orig_var */) const {
566
    if (m_varshape_changed.count(new_var)) {
M
Megvii Engine Team 已提交
567 568 569
        return RelayoutPlaceholder::make(
                       new_var,
                       ReformatKey{TensorFormats::CHWNc4, TensorFormats::NCHWc4})
570 571 572 573 574 575
                .node();
    }
    return new_var;
}

std::unique_ptr<EnableCHWN4Pass> EnableCHWN4Pass::make_chwn4_converter() {
576
    MIDOUT_B("EnableCHWN4Pass::make")
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
    auto ret = std::make_unique<EnableCHWN4Pass>();
    ret->set_var_replace_check_flag(VarReplaceCheckFlag::NOCHECK);
    auto&& replace_func = ret->m_opr_replace_func;
    auto&& varshape_changed = ret->m_varshape_changed;
    // replace rule for conv bias opr
    auto replace_conv_bias_opr = [&varshape_changed](
                                         OperatorNodeBase* opr,
                                         const VarNodeArray& new_inp) {
        using Param = megdnn::param::ConvBias;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& conv_bias = opr->cast_final_safe<opr::ConvBiasForward>();
        if (conv_bias.param().format != Format::NCHW4 ||
            conv_bias.output(0)->dtype().enumv() != DTypeEnum::QuantizedS8) {
            size_t nr_inps = new_inp.size();
            bool shape_has_changed = false;
            for (size_t i = 0; i < nr_inps; ++i) {
                if (varshape_changed.count(new_inp[i])) {
                    shape_has_changed = true;
                    break;
                }
            }
            mgb_assert(
                    !shape_has_changed,
                    "EnableCHWN4Pass assumes that the shape of inputs of"
                    "ConvBias operators whose output dtype is not QuantizedS8 "
                    "can not be changed in this opt pass");
M
Megvii Engine Team 已提交
604 605 606 607 608 609 610
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
        }
        mgb_assert(
                varshape_changed.count(new_inp[1]) == 0,
                "EnableCHWN4Pass assumes that filter tensor of "
                "conv_bias operator can not be changed by other operators");
        VarNode *src = nullptr, *weight = nullptr, *bias = nullptr, *z_inp = nullptr;
611
        // process src tensor
M
Megvii Engine Team 已提交
612
        if (varshape_changed.count(new_inp[0]) == 0) {  // new input is NCHW4 layout
613 614 615
            // currently not support group conv
            auto symvar = RelayoutPlaceholder::make(
                    new_inp[0],
616
                    ReformatKey{TensorFormats::NCHWc4, TensorFormats::CHWNc4});
617 618 619 620 621 622 623 624
            src = symvar.node();
        } else {  // new input is NCHW32 layout
            src = new_inp[0];
        }
        // process weight tensor
        {
            auto symvar = RelayoutPlaceholder::make(
                    new_inp[1],
625
                    ReformatKey{TensorFormats::NCHWc4, TensorFormats::CHWNc4});
626 627 628 629 630 631 632 633 634 635 636 637 638 639
            weight = symvar.node();
        }
        if (new_inp.size() == 2) {
            auto param = conv_bias.param();
            param.format = Format::CHWN4;
            auto new_opr = opr::ConvBiasForward::make(
                    src, weight, param, conv_bias.execution_policy(),
                    conv_bias.config());
            varshape_changed.insert(new_opr.node());
            return new_opr.node()->owner_opr();
        }
        auto process_inp = [&](VarNode* inp) -> VarNode* {
            if (varshape_changed.count(inp) == 0) {
                auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
640
                        inp, ReformatKey{TensorFormats::NCHWc4, TensorFormats::CHWNc4});
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
                return symvar.node();
            } else {
                return inp;
            }
        };
        // process bias tensor
        bias = process_inp(new_inp[2]);
        if (new_inp.size() == 3) {
            auto param = conv_bias.param();
            param.format = Format::CHWN4;
            auto new_opr = opr::ConvBiasForward::make(
                    src, weight, bias, param, conv_bias.execution_policy(),
                    conv_bias.config());
            varshape_changed.insert(new_opr.node());
            return new_opr.node()->owner_opr();
        }
        // process z_inp tensor
        z_inp = process_inp(new_inp[3]);
        auto param = conv_bias.param();
        param.format = Format::CHWN4;
        auto new_opr = opr::ConvBiasForward::make(
                src, weight, bias, z_inp, param, conv_bias.execution_policy(),
                conv_bias.config());
        varshape_changed.insert(new_opr.node());
        return new_opr.node()->owner_opr();
    };
    // replace rule for elemwise like opr
    // for oprs support NCHW4 and CHWN4 layout
    auto replace_elemwise_like_opr = [&varshape_changed](
                                             OperatorNodeBase* opr,
                                             const VarNodeArray new_inp) {
        mgb_assert(opr->input().size() == new_inp.size());
        size_t nr_inps = new_inp.size();
        size_t nr_shape_changed = 0;
        for (size_t i = 0; i < nr_inps; ++i) {
            if (varshape_changed.count(new_inp[i])) {
                nr_shape_changed++;
            }
        }
        if (nr_shape_changed) {
            auto inps = new_inp;
M
Megvii Engine Team 已提交
682
            if (nr_shape_changed >= nr_inps / 2) {  // CHWN4 > NCHW4 -> use CHWN4
683 684 685
                for (size_t i = 0; i < nr_inps; ++i) {
                    if (varshape_changed.count(new_inp[i]) == 0) {
                        auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
686 687 688
                                new_inp[i],
                                ReformatKey{
                                        TensorFormats::NCHWc4, TensorFormats::CHWNc4});
689 690 691
                        inps[i] = symvar.node();
                    }
                }
M
Megvii Engine Team 已提交
692 693
                auto new_opr =
                        serialization::copy_opr_shallow(*opr, inps, opr->config());
694 695 696 697 698 699
                varshape_changed.insert(new_opr->output(0));
                return new_opr;
            } else {  // CHWN4 < NCHW4 -> use NCHW4
                for (size_t i = 0; i < nr_inps; ++i) {
                    if (varshape_changed.count(new_inp[i])) {
                        auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
700 701 702
                                new_inp[i],
                                ReformatKey{
                                        TensorFormats::CHWNc4, TensorFormats::NCHWc4});
703 704 705
                        inps[i] = symvar.node();
                    }
                }
M
Megvii Engine Team 已提交
706
                return serialization::copy_opr_shallow(*opr, inps, opr->config());
707 708 709 710 711 712 713 714 715 716 717 718 719
            }
        }
        return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
    };
    // for oprs only supports NCHW4 layout
    auto replace_inps_to_nchw4 = [&varshape_changed](
                                         OperatorNodeBase* opr,
                                         const VarNodeArray new_inp) {
        mgb_assert(opr->input().size() == new_inp.size());
        VarNodeArray inps = new_inp;
        for (size_t i = 0; i < opr->input().size(); ++i) {
            if (varshape_changed.count(new_inp[i])) {
                auto symvar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
720 721
                        new_inp[i],
                        ReformatKey{TensorFormats::CHWNc4, TensorFormats::NCHWc4});
722 723 724
                inps[i] = symvar.node();
            }
        }
M
Megvii Engine Team 已提交
725
        auto new_opr = serialization::copy_opr_shallow(*opr, inps, opr->config());
726 727
        return new_opr;
    };
M
Megvii Engine Team 已提交
728 729 730 731 732 733 734 735
    auto replace_non_nchw4_opr =
            [&varshape_changed](OperatorNodeBase* opr, const VarNodeArray new_inp) {
                size_t nr_inps = opr->input().size();
                bool shape_has_changed = false;
                for (size_t i = 0; i < nr_inps; ++i) {
                    if (varshape_changed.count(new_inp[i])) {
                        shape_has_changed = true;
                    }
736
                }
M
Megvii Engine Team 已提交
737 738 739 740 741 742 743
                mgb_assert(
                        !shape_has_changed,
                        "EnableCHWN4Pass assumes that inputs' shape of "
                        "non-nchw4 operators "
                        "can not be changed in this opt "
                        "pass");
                return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
744
            };
M
Megvii Engine Team 已提交
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
    // capture by copy to avoid use after return
    auto replace_warp_affine_opr = [replace_inps_to_nchw4, replace_non_nchw4_opr](
                                           OperatorNodeBase* opr,
                                           const VarNodeArray new_inp) {
        using Param = opr::WarpAffineForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& warp = opr->cast_final_safe<opr::WarpAffineForward>();
        if (warp.param().format != Format::NCHW4) {
            return replace_non_nchw4_opr(opr, new_inp);
        }
        return replace_inps_to_nchw4(opr, new_inp);
    };
    auto replace_warp_perspective_opr = [replace_inps_to_nchw4, replace_non_nchw4_opr](
                                                OperatorNodeBase* opr,
                                                const VarNodeArray new_inp) {
        using Param = opr::WarpPerspectiveForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& warp = opr->cast_final_safe<opr::WarpPerspectiveForward>();
        if (warp.param().format != Format::NCHW4) {
            return replace_non_nchw4_opr(opr, new_inp);
        }
        return replace_inps_to_nchw4(opr, new_inp);
    };
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
    auto replace_resize_opr = [replace_inps_to_nchw4, replace_non_nchw4_opr](
                                      OperatorNodeBase* opr,
                                      const VarNodeArray new_inp) {
        using Param = opr::ResizeForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& resize = opr->cast_final_safe<opr::ResizeForward>();
        if (resize.param().format != Format::NCHW4) {
            return replace_non_nchw4_opr(opr, new_inp);
        }
        return replace_inps_to_nchw4(opr, new_inp);
    };
    auto replace_pooling_opr = [&varshape_changed, replace_non_nchw4_opr](
                                       OperatorNodeBase* opr,
                                       const VarNodeArray new_inp) {
        using Param = opr::PoolingForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& pooling = opr->cast_final_safe<opr::PoolingForward>();
        if (pooling.param().format != Format::NCHW4) {
            return replace_non_nchw4_opr(opr, new_inp);
        }
        size_t nr_inps = opr->input().size();
        MGB_MARK_USED_VAR(nr_inps);
        mgb_assert(nr_inps == 1);
        if (varshape_changed.count(new_inp[0])) {
            auto new_param = pooling.param();
            new_param.format = Format::CHWN4;
798 799
            auto new_pooling = opr::PoolingForward::make(
                    new_inp[0], new_param, pooling.execution_policy(), opr->config());
800 801 802 803 804 805 806 807 808 809
            varshape_changed.insert(new_pooling.node());
            return new_pooling.node()->owner_opr();
        }
        return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
    };
    replace_func[opr::ConvBiasForward::typeinfo()] = replace_conv_bias_opr;

    // elemwise like
    replace_func[opr::Elemwise::typeinfo()] = replace_elemwise_like_opr;
    replace_func[opr::TypeCvt::typeinfo()] = replace_elemwise_like_opr;
M
Megvii Engine Team 已提交
810
    replace_func[opr::ElemwiseMultiType::typeinfo()] = replace_elemwise_like_opr;
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
    replace_func[opr::PowC::typeinfo()] = replace_elemwise_like_opr;

    // format aware
    replace_func[opr::PoolingForward::typeinfo()] = replace_pooling_opr;
    replace_func[opr::WarpAffineForward::typeinfo()] = replace_warp_affine_opr;
    replace_func[opr::WarpPerspectiveForward::typeinfo()] =
            replace_warp_perspective_opr;
    replace_func[opr::ResizeForward::typeinfo()] = replace_resize_opr;

    // to nchw4
    replace_func[opr::Reduce::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::Concat::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::Reshape::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::GetVarShape::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::Dimshuffle::typeinfo()] = replace_inps_to_nchw4;
    replace_func[opr::BatchConvBias::typeinfo()] = replace_inps_to_nchw4;
    return ret;
828
    MIDOUT_E
829 830
}

831
/* ================ EnableNCHW4Pass ================ */
M
Megvii Engine Team 已提交
832 833
VarNode* EnableNCHW4Pass::on_graph_endpoint_var(
        VarNode* new_var, VarNode* orig_var) const {
834 835
    if (!orig_var->shape().eq_shape(new_var->shape())) {
        return RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
836
                       new_var, ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHW})
837 838 839 840 841
                .node();
    }
    return new_var;
}

842
//! FIXME: All float oprs do not support NCHW4. Supports it in the future plz.
M
Megvii Engine Team 已提交
843
std::unique_ptr<EnableNCHW4Pass> EnableNCHW4Pass::make_nchw4_converter() {
844
    MIDOUT_B("EnableNCHW4Pass::make")
845 846 847 848
    auto ret = std::make_unique<EnableNCHW4Pass>();
    ret->set_var_replace_check_flag(VarReplaceCheckFlag::NOCHECK);
    megdnn::param::Convolution::Format conv_format =
            megdnn::param::Convolution::Format::NCHW4;
849
    megdnn::param::ConvBias::Format conv_bias_format =
850
            megdnn::param::ConvBias::Format::NCHW4;
851 852
    megdnn::param::ConvBias::Format conv_bias_format_nchw4_nchw =
            megdnn::param::ConvBias::Format::NCHW4_NCHW;
853 854
    megdnn::param::BatchConvBias::Format batch_conv_bias_format =
            megdnn::param::BatchConvBias::Format::NCHW4;
855 856
    ReformatKey src_to_nchw4_mode{TensorFormats::NCHW, TensorFormats::NCHWc4};
    ReformatKey src_to_nchw_mode{TensorFormats::NCHWc4, TensorFormats::NCHW};
M
Megvii Engine Team 已提交
857 858 859
    ReformatKey weight_to_nchw4_mode_dense{TensorFormats::KCRS, TensorFormats::KCRSc4};
    ReformatKey weight_to_nchw4_mode_group{
            TensorFormats::GKCRS, TensorFormats::GKCRSc4};
860

861
    struct ConvMode {
862 863
        ReformatKey weight;
        ReformatKey src;
864 865
    };

M
Megvii Engine Team 已提交
866 867 868 869
    auto trans_nchw4 = [weight_to_nchw4_mode_dense, weight_to_nchw4_mode_group,
                        src_to_nchw4_mode](
                               const megdnn::param::Convolution::Sparse conv_mode,
                               const VarNode* filter) -> ConvMode {
870
        if (conv_mode == megdnn::param::Convolution::Sparse::DENSE) {
M
Megvii Engine Team 已提交
871
            mgb_assert(filter->shape().ndim == 4, "The origin filter is not NCHW mode");
872
            size_t IC = filter->shape()[1];
873
            if (IC < 4) {
M
Megvii Engine Team 已提交
874 875 876 877 878 879
                ReformatKey weight{
                        TensorFormats::KCRS, TensorFormats::KCRSc4,
                        ReformatKey::Attribute::IC_SMALL};
                ReformatKey src{
                        TensorFormats::NCHW, TensorFormats::NCHWc4,
                        ReformatKey::Attribute::IC_SMALL};
880
                return {weight, src};
881 882 883
            } else {
                return {weight_to_nchw4_mode_dense, src_to_nchw4_mode};
            }
884
        } else {
M
Megvii Engine Team 已提交
885 886 887 888
            mgb_throw_if(
                    conv_mode != megdnn::param::Convolution::Sparse::GROUP,
                    MegBrainError, "mode error");
            mgb_assert(filter->shape().ndim == 5, "The origin filter if not NCHW mode");
889
            size_t IC = filter->shape()[2];
M
Megvii Engine Team 已提交
890 891 892 893
            mgb_assert(
                    IC % 4 == 0,
                    "The input channel should be divisible by 4 for group "
                    "conv");
894
            return {weight_to_nchw4_mode_group, src_to_nchw4_mode};
895 896
        }
    };
897 898 899
    auto replace_conv_opr = [trans_nchw4, conv_format](
                                    OperatorNodeBase* opr,
                                    const VarNodeArray& new_inp) {
900
        if (new_inp[0]->dtype().enumv() == DTypeEnum::Float32) {
M
Megvii Engine Team 已提交
901
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
902
        }
903 904
        mgb_assert(opr->input().size() == new_inp.size());
        auto& conv_opr = opr->cast_final_safe<opr::ConvolutionForward>();
M
Megvii Engine Team 已提交
905 906
        if (conv_opr.param().format != megdnn::param::Convolution::Format::NCHW) {
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
907
        }
M
Megvii Engine Team 已提交
908
        auto conv_mode = trans_nchw4(conv_opr.param().sparse, new_inp[1]);
909 910 911 912
        VarNode *conv_src = new_inp[0], *conv_filter = new_inp[1];
        // src: NCHW --> NCWH4
        if (new_inp[0]->shape().ndim != 5) {
            mgb_assert(new_inp[0]->shape().ndim == 4);
M
Megvii Engine Team 已提交
913
            auto new_src = RelayoutPlaceholder::make(new_inp[0], conv_mode.src);
914 915 916
            conv_src = new_src.node();
        }
        // weight: NCHW --> NCHW4
M
Megvii Engine Team 已提交
917
        auto new_filter = RelayoutPlaceholder::make(new_inp[1], conv_mode.weight);
918 919 920 921 922 923
        conv_filter = new_filter.node();
        // format: NCHW --> NCHW4
        auto new_param = conv_opr.param();
        new_param.format = conv_format;
        // dst
        auto new_conv_opr = opr::Convolution::make(
M
Megvii Engine Team 已提交
924 925
                conv_src, conv_filter, new_param, conv_opr.execution_policy(),
                conv_opr.config());
926
        OperatorNodeBase* new_opr = new_conv_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
927 928 929
        mgb_assert(
                new_conv_opr.shape().ndim == 5,
                "The conv dst dim is not trans to nchw4");
930 931 932
        return new_opr;
    };

933
    auto replace_deconv_opr = [trans_nchw4, conv_format](
M
Megvii Engine Team 已提交
934 935
                                      OperatorNodeBase* opr,
                                      const VarNodeArray& new_inp) {
936
        if (new_inp[1]->dtype().enumv() == DTypeEnum::Float32) {
M
Megvii Engine Team 已提交
937
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
938 939 940
        }
        mgb_assert(opr->input().size() == new_inp.size());
        auto& deconv_opr = opr->cast_final_safe<opr::ConvolutionBackwardData>();
M
Megvii Engine Team 已提交
941 942 943
        if ((deconv_opr.param().format != megdnn::param::Convolution::Format::NCHW) ||
            (deconv_opr.param().sparse != megdnn::param::Convolution::Sparse::DENSE)) {
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
944 945
        }
        VarNode *deconv_src = new_inp[1], *deconv_filter = new_inp[0];
946
        auto deconv_mode = trans_nchw4(deconv_opr.param().sparse, deconv_filter);
947 948 949
        // src: NCHW --> NCWH4
        if (deconv_src->shape().ndim != 5) {
            mgb_assert(deconv_src->shape().ndim == 4);
M
Megvii Engine Team 已提交
950
            auto new_src = RelayoutPlaceholder::make(deconv_src, deconv_mode.src);
951 952 953
            deconv_src = new_src.node();
        }
        // weight: NCHW --> NCHW4
M
Megvii Engine Team 已提交
954
        auto new_filter = RelayoutPlaceholder::make(deconv_filter, deconv_mode.weight);
955 956 957 958 959 960
        deconv_filter = new_filter.node();
        // format: NCHW --> NCHW4
        auto new_param = deconv_opr.param();
        new_param.format = conv_format;
        // dst
        auto new_deconv_opr = opr::ConvolutionBackwardData::make_deconv(
M
Megvii Engine Team 已提交
961 962
                deconv_src, deconv_filter, new_param, deconv_opr.execution_policy(),
                deconv_opr.config());
963 964 965 966
        OperatorNodeBase* new_opr = new_deconv_opr.node()->owner_opr();
        return new_opr;
    };

M
Megvii Engine Team 已提交
967
    auto replace_batch_conv_bias_opr = [batch_conv_bias_format, src_to_nchw4_mode](
M
Megvii Engine Team 已提交
968 969
                                               OperatorNodeBase* opr,
                                               const VarNodeArray& new_inp) {
970
        if (new_inp[0]->dtype().enumv() == DTypeEnum::Float32) {
M
Megvii Engine Team 已提交
971
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
972
        }
973
        mgb_assert(opr->input().size() == new_inp.size());
M
Megvii Engine Team 已提交
974
        auto& batch_conv_bias_opr = opr->cast_final_safe<opr::BatchConvBiasForward>();
975 976
        if (batch_conv_bias_opr.param().format !=
            megdnn::param::BatchConvBias::Format::NCHW) {
M
Megvii Engine Team 已提交
977
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
978 979
        }

M
Megvii Engine Team 已提交
980 981 982 983
        mgb_assert(
                batch_conv_bias_opr.param().format ==
                        megdnn::param::BatchConvBias::Format::NCHW,
                "ConvertFormat Pass only support converting NCHW to NCHW4");
984 985 986
        // what should be converted: src, weight
        VarNode *src = new_inp[0], *filter = new_inp[1];
        // src: NCHW --> NCHW4
M
Megvii Engine Team 已提交
987
        if (new_inp[0]->shape().ndim != 5) {
988
            mgb_assert(new_inp[0]->shape().ndim == 4);
M
Megvii Engine Team 已提交
989
            auto new_src = RelayoutPlaceholder::make(new_inp[0], src_to_nchw4_mode);
990 991 992 993
            src = new_src.node();
        }
        // weight: BNCHW --> BNCHW4
        // only support dense mode, which is similar with conv->group.
994
        ReformatKey weight_mode{TensorFormats::GKCRS, TensorFormats::GKCRSc4};
995 996 997 998 999 1000
        auto new_filter = RelayoutPlaceholder::make(new_inp[1], weight_mode);
        filter = new_filter.node();
        // format: NCHW --> NCHW4
        auto new_param = batch_conv_bias_opr.param();
        new_param.format = batch_conv_bias_format;
        if (new_inp.size() == 2) {
M
Megvii Engine Team 已提交
1001
            auto dst = opr::BatchConvBias::make(
M
Megvii Engine Team 已提交
1002
                    src, filter, new_param, batch_conv_bias_opr.execution_policy(),
M
Megvii Engine Team 已提交
1003
                    batch_conv_bias_opr.config());
1004
            OperatorNodeBase* new_opr = dst.node()->owner_opr();
M
Megvii Engine Team 已提交
1005 1006 1007
            mgb_assert(
                    dst.shape().ndim == 5,
                    "The conv_bias dst dim is not trans to nchw4");
1008 1009 1010 1011 1012
            return new_opr;
        }
        // bias: NCHW --> NCHW4
        VarNode* bias = new_inp[2];
        if (new_inp[2]->shape().ndim == 4) {
M
Megvii Engine Team 已提交
1013
            auto new_bias = RelayoutPlaceholder::make(new_inp[2], src_to_nchw4_mode);
1014 1015 1016
            bias = new_bias.node();
        }
        if (new_inp.size() == 3) {
M
Megvii Engine Team 已提交
1017 1018 1019 1020
            auto dst = opr::BatchConvBias::make(
                    src, filter, bias, new_param,
                    batch_conv_bias_opr.execution_policy(),
                    batch_conv_bias_opr.config());
1021
            OperatorNodeBase* new_opr = dst.node()->owner_opr();
M
Megvii Engine Team 已提交
1022 1023 1024
            mgb_assert(
                    dst.shape().ndim == 5,
                    "The conv_bias dst dim is not trans to nchw4");
1025 1026 1027 1028 1029
            return new_opr;
        }
        // z_inp: NCHW --> NCHW4
        VarNode* z_inp = new_inp[3];
        if (new_inp[3]->shape().ndim == 4) {
M
Megvii Engine Team 已提交
1030
            auto new_z = RelayoutPlaceholder::make(new_inp[3], src_to_nchw4_mode);
1031 1032
            z_inp = new_z.node();
        }
M
Megvii Engine Team 已提交
1033 1034 1035
        auto dst = opr::BatchConvBias::make(
                src, filter, bias, z_inp, new_param,
                batch_conv_bias_opr.execution_policy(), batch_conv_bias_opr.config());
1036
        OperatorNodeBase* new_opr = dst.node()->owner_opr();
M
Megvii Engine Team 已提交
1037 1038
        mgb_assert(
                dst.shape().ndim == 5, "The conv_bias dst dim is not trans to nchw4");
1039 1040 1041
        return new_opr;
    };
    auto replace_conv_bias_opr = [trans_nchw4, conv_bias_format,
M
Megvii Engine Team 已提交
1042
                                  conv_bias_format_nchw4_nchw, src_to_nchw4_mode](
1043 1044
                                         OperatorNodeBase* opr,
                                         const VarNodeArray& new_inp) {
1045
        if (new_inp[0]->dtype().enumv() == DTypeEnum::Float32) {
M
Megvii Engine Team 已提交
1046
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1047
        }
1048 1049
        mgb_assert(opr->input().size() == new_inp.size());
        auto& conv_bias_opr = opr->cast_final_safe<opr::ConvBiasForward>();
M
Megvii Engine Team 已提交
1050 1051
        if (conv_bias_opr.param().format != megdnn::param::Convolution::Format::NCHW) {
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1052 1053
        }

1054 1055
        // what should be converted: src, weight
        VarNode *conv_bias_src = new_inp[0], *conv_bias_filter = new_inp[1];
M
Megvii Engine Team 已提交
1056
        auto conv_mode = trans_nchw4(conv_bias_opr.param().sparse, new_inp[1]);
1057
        // src: NCHW --> NCHW4
1058
        if (new_inp[0]->shape().ndim != 5) {
1059
            mgb_assert(new_inp[0]->shape().ndim == 4);
M
Megvii Engine Team 已提交
1060
            auto new_src = RelayoutPlaceholder::make(new_inp[0], conv_mode.src);
1061 1062 1063
            conv_bias_src = new_src.node();
        }
        // weight: NCHW --> NCHW4 or GNCHW --> GNCHW4
M
Megvii Engine Team 已提交
1064
        auto new_filter = RelayoutPlaceholder::make(new_inp[1], conv_mode.weight);
1065 1066 1067
        conv_bias_filter = new_filter.node();
        // format: NCHW --> NCHW4
        auto new_param = conv_bias_opr.param();
1068 1069 1070 1071 1072 1073
        if (conv_bias_opr.output().size() > 0 &&
            conv_bias_opr.output(0)->dtype().enumv() == DTypeEnum::Float32) {
            new_param.format = conv_bias_format_nchw4_nchw;
        } else {
            new_param.format = conv_bias_format;
        }
1074 1075
        if (new_inp.size() == 2) {
            auto new_conv_bias_opr = opr::ConvBias::make(
M
Megvii Engine Team 已提交
1076 1077
                    conv_bias_src, conv_bias_filter, new_param,
                    conv_bias_opr.execution_policy(), conv_bias_opr.config());
1078
            OperatorNodeBase* new_opr = new_conv_bias_opr.node()->owner_opr();
1079
            mgb_assert(
M
Megvii Engine Team 已提交
1080 1081 1082
                    new_conv_bias_opr.node()->dtype().enumv() == DTypeEnum::Float32 ||
                            new_conv_bias_opr.shape().ndim == 5,
                    "The conv_bias dst dim is not trans to nchw4");
1083 1084
            return new_opr;
        }
1085
        // bias: NCHW --> NCHW4 when bias_dtype is not Float32
1086
        VarNode* conv_bias_bias = new_inp[2];
1087 1088
        if (new_inp[2]->dtype().enumv() != DTypeEnum::Float32 &&
            new_inp[2]->shape().ndim == 4) {
M
Megvii Engine Team 已提交
1089
            auto new_bias = RelayoutPlaceholder::make(new_inp[2], src_to_nchw4_mode);
1090 1091 1092 1093
            conv_bias_bias = new_bias.node();
        }
        if (new_inp.size() == 3) {
            auto new_conv_bias_opr = opr::ConvBias::make(
M
Megvii Engine Team 已提交
1094 1095
                    conv_bias_src, conv_bias_filter, conv_bias_bias, new_param,
                    conv_bias_opr.execution_policy(), conv_bias_opr.config());
1096
            OperatorNodeBase* new_opr = new_conv_bias_opr.node()->owner_opr();
1097
            mgb_assert(
M
Megvii Engine Team 已提交
1098 1099 1100
                    new_conv_bias_opr.node()->dtype().enumv() == DTypeEnum::Float32 ||
                            new_conv_bias_opr.shape().ndim == 5,
                    "The conv_bias dst dim is not trans to nchw4");
1101 1102
            return new_opr;
        }
1103
        // z_inp: NCHW --> NCHW4 when bias_dtype is not Float32
1104
        VarNode* z_inp = new_inp[3];
1105 1106
        if (new_inp[3]->dtype().enumv() != DTypeEnum::Float32 &&
            new_inp[3]->shape().ndim == 4) {
M
Megvii Engine Team 已提交
1107
            auto new_z = RelayoutPlaceholder::make(new_inp[3], src_to_nchw4_mode);
1108 1109
            z_inp = new_z.node();
        }
M
Megvii Engine Team 已提交
1110
        auto new_conv_bias_opr = opr::ConvBias::make(
M
Megvii Engine Team 已提交
1111 1112
                conv_bias_src, conv_bias_filter, conv_bias_bias, z_inp, new_param,
                conv_bias_opr.execution_policy(), conv_bias_opr.config());
1113
        OperatorNodeBase* new_opr = new_conv_bias_opr.node()->owner_opr();
1114
        mgb_assert(
M
Megvii Engine Team 已提交
1115 1116 1117
                new_conv_bias_opr.node()->dtype().enumv() == DTypeEnum::Float32 ||
                        new_conv_bias_opr.shape().ndim == 5,
                "The conv_bias dst dim is not trans to nchw4");
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
        return new_opr;
    };
    auto replace_elemwise_opr = [=](OperatorNodeBase* opr,
                                    const VarNodeArray& new_inp) {
        mgb_assert(opr->input().size() == new_inp.size());
        bool has_inp_changed = false;
        for (size_t i = 0; i < opr->input().size(); i++) {
            if (new_inp[i]->shape().ndim == 5) {
                has_inp_changed = true;
                break;
            }
        }
        if (has_inp_changed) {
            auto temp_inp = new_inp;
            for (size_t i = 0; i < opr->input().size(); i++) {
                if (new_inp[i]->shape().ndim == 4) {
M
Megvii Engine Team 已提交
1134 1135
                    auto new_var =
                            RelayoutPlaceholder::make(new_inp[i], src_to_nchw4_mode);
1136 1137
                    temp_inp[i] = new_var.node();
                } else {
M
Megvii Engine Team 已提交
1138 1139 1140
                    mgb_assert(
                            (new_inp[i]->shape().ndim == 5) ||
                            new_inp[i]->shape().is_scalar());
1141 1142
                }
            }
M
Megvii Engine Team 已提交
1143
            return serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
1144
        } else {
M
Megvii Engine Team 已提交
1145
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1146 1147 1148
        }
    };
    auto relayout_inp_to_nchw = [=](OperatorNodeBase* opr,
M
Megvii Engine Team 已提交
1149
                                    const VarNodeArray& new_inp) {
1150 1151 1152 1153 1154 1155
        mgb_assert(opr->input().size() == new_inp.size());
        VarNodeArray temp_inp = new_inp;
        for (size_t i = 0; i < opr->input().size(); i++) {
            if (!opr->input(i)->shape().eq_shape(new_inp[i]->shape())) {
                mgb_assert(opr->input(i)->shape().ndim == 4);
                mgb_assert(new_inp[i]->shape().ndim == 5);
M
Megvii Engine Team 已提交
1156
                auto new_var = RelayoutPlaceholder::make(new_inp[i], src_to_nchw_mode);
1157 1158 1159 1160 1161
                temp_inp[i] = new_var.node();
            }
        }
        return serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
    };
M
Megvii Engine Team 已提交
1162
    auto replace_pooling_opr = [](OperatorNodeBase* opr, const VarNodeArray& new_inp) {
1163
        if (new_inp[0]->dtype().enumv() == DTypeEnum::Float32) {
M
Megvii Engine Team 已提交
1164
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1165
        }
1166 1167 1168 1169
        using Param = opr::PoolingForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& pooling = opr->cast_final_safe<opr::PoolingForward>();
1170 1171 1172
        if (pooling.param().format != Format::NCHW) {
            return opr;
        }
1173 1174 1175 1176
        if (new_inp[0]->shape().ndim == 5) {
            mgb_assert(new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS8);
            auto new_param = pooling.param();
            new_param.format = Format::NCHW4;
1177 1178
            auto new_pooling = opr::PoolingForward::make(
                    new_inp[0], new_param, pooling.execution_policy(), opr->config());
M
Megvii Engine Team 已提交
1179 1180 1181 1182 1183
            mgb_assert(
                    new_pooling.shape().ndim == 5,
                    "out var of Pooling opr after transform must be 5 (got: "
                    "%zu).",
                    new_pooling.shape().ndim);
1184 1185
            return new_pooling.node()->owner_opr();
        }
M
Megvii Engine Team 已提交
1186
        auto new_opr = serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1187 1188
        return new_opr;
    };
M
Megvii Engine Team 已提交
1189
    auto replace_resize_opr = [](OperatorNodeBase* opr, const VarNodeArray& new_inp) {
1190
        if (new_inp[0]->dtype().enumv() == DTypeEnum::Float32) {
M
Megvii Engine Team 已提交
1191
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1192
        }
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
        using Param = opr::ResizeForward::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& resize = opr->cast_final_safe<opr::ResizeForward>();
        if (new_inp[0]->shape().ndim == 5) {
            mgb_assert(new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS8);
            auto new_param = resize.param();
            new_param.format = Format::NCHW4;
            auto new_resize = opr::ResizeForward::make(
                    new_inp[0], new_inp[1], new_param, opr->config());
M
Megvii Engine Team 已提交
1203 1204 1205 1206 1207
            mgb_assert(
                    new_resize.shape().ndim == 5,
                    "out var of Resize opr after transform must be 5 (got: "
                    "%zu).",
                    new_resize.shape().ndim);
1208 1209
            return new_resize.node()->owner_opr();
        }
M
Megvii Engine Team 已提交
1210
        auto new_opr = serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1211 1212 1213 1214
        return new_opr;
    };
    auto replace_warp_perspective_opr = [](OperatorNodeBase* opr,
                                           const VarNodeArray& new_inp) {
1215
        if (new_inp[0]->dtype().enumv() == DTypeEnum::Float32) {
M
Megvii Engine Team 已提交
1216
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1217
        }
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
        using Param = opr::WarpPerspective::Param;
        using Format = Param::Format;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& warp = opr->cast_final_safe<opr::WarpPerspectiveForward>();
        if (new_inp[0]->shape().ndim == 5) {
            mgb_assert(new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS8);
            auto new_param = warp.param();
            new_param.format = Format::NCHW4;
            SymbolVar new_warp;
            if (new_inp.size() == 3) {
                new_warp = opr::WarpPerspectiveForward::make(
                        new_inp[0], new_inp[1], nullptr, new_inp[2], new_param,
                        opr->config());
            } else {
                mgb_assert(new_inp.size() == 4);
                new_warp = opr::WarpPerspectiveForward::make(
M
Megvii Engine Team 已提交
1234 1235 1236 1237 1238 1239 1240 1241 1242
                        new_inp[0], new_inp[1], new_inp[2], new_inp[3], new_param,
                        opr->config());
            }
            mgb_assert(
                    new_warp.shape().ndim == 5,
                    "out var of WarpPerspective opr after transform must be "
                    "5 (got: "
                    "%zu).",
                    new_warp.shape().ndim);
1243 1244
            return new_warp.node()->owner_opr();
        }
M
Megvii Engine Team 已提交
1245
        auto new_opr = serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1246 1247
        return new_opr;
    };
1248 1249 1250
    auto&& replace_func = ret->m_opr_replace_func;
    //! supportted nchw4
    replace_func[opr::Convolution::typeinfo()] = replace_conv_opr;
M
Megvii Engine Team 已提交
1251
    replace_func[opr::ConvolutionBackwardData::typeinfo()] = replace_deconv_opr;
1252
    replace_func[opr::ConvBias::typeinfo()] = replace_conv_bias_opr;
M
Megvii Engine Team 已提交
1253
    replace_func[opr::BatchConvBias::typeinfo()] = replace_batch_conv_bias_opr;
1254 1255 1256 1257
    replace_func[opr::PoolingForward::typeinfo()] = replace_pooling_opr;
    replace_func[opr::ResizeForward::typeinfo()] = replace_resize_opr;
    replace_func[opr::WarpPerspectiveForward::typeinfo()] =
            replace_warp_perspective_opr;
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
    replace_func[opr::Elemwise::typeinfo()] = replace_elemwise_opr;
    replace_func[opr::TypeCvt::typeinfo()] = replace_elemwise_opr;
    replace_func[opr::ElemwiseMultiType::typeinfo()] = replace_elemwise_opr;
    replace_func[opr::PowC::typeinfo()] = replace_elemwise_opr;
    //! not supported nchw4
    replace_func[opr::Concat::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::Subtensor::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::GetVarShape::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::Dimshuffle::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::Reduce::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::AssertEqual::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::IncrSubtensor::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::WarpAffineForward::typeinfo()] = relayout_inp_to_nchw;
    return ret;
1272
    MIDOUT_E
1273 1274
}

1275
/* ================ EnableNchwxxPass =============== */
M
Megvii Engine Team 已提交
1276 1277
VarNode* EnableNchwxxPass::on_graph_endpoint_var(
        VarNode* new_var, VarNode* orig_var) const {
1278
    if (!orig_var->shape().eq_shape(new_var->shape())) {
1279
        if (m_pack_c_size == 8) {
M
Megvii Engine Team 已提交
1280 1281 1282
            return RelayoutPlaceholder::make(
                           new_var,
                           ReformatKey{TensorFormats::NCHWc8, TensorFormats::NCHW})
1283 1284
                    .node();
        } else if (m_pack_c_size == 4) {
M
Megvii Engine Team 已提交
1285 1286 1287
            return RelayoutPlaceholder::make(
                           new_var,
                           ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHW})
1288 1289
                    .node();
        }
1290 1291 1292
    }
    return new_var;
}
1293

M
Megvii Engine Team 已提交
1294
static inline TensorShape nchwxx_shape_2_nchw_shape(const TensorShape& origin_shape) {
1295 1296 1297 1298 1299 1300 1301
    mgb_assert(origin_shape.ndim == 5);
    TensorShape result = origin_shape;
    result[1] *= result[4];
    result.ndim = 4;
    return result;
}

1302
template <typename OprType>
1303 1304 1305 1306 1307 1308 1309 1310
static inline bool nchw_nchwxx_valid(
        const OprType& opr, const VarNodeArray& new_inp, const size_t pack_size,
        megdnn::param::ConvBias::NonlineMode nonline_mode =
                megdnn::param::ConvBias::NonlineMode::IDENTITY,
        bool is_dot = false) {
    auto& src_node = new_inp[0];
    auto& filter_node = new_inp[1];
    auto dst_node = opr.output(0);
1311 1312
    //! already transformed or have fuse Z
    if (filter_node->shape().ndim != 4 || new_inp.size() == 4) {
1313 1314
        return false;
    }
1315 1316
    megdnn::ConvolutionBase<megdnn::param::Convolution>::CanonizedFilterMeta fm;
    fm.format = megdnn::param::Convolution::Format::NCHW;
M
Megvii Engine Team 已提交
1317
    fm.should_flip = opr.param().mode == megdnn::ConvBiasForward::Mode::CONVOLUTION;
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
    fm.group = 1;
    fm.spatial_ndim = 2;
    fm.ocpg = filter_node->shape()[0];
    fm.icpg = filter_node->shape()[1];
    fm.spatial[0] = filter_node->shape()[2];
    fm.spatial[1] = filter_node->shape()[3];
    fm.stride[0] = opr.param().stride_h;
    fm.stride[1] = opr.param().stride_w;
    fm.padding[0] = opr.param().pad_h;
    fm.padding[1] = opr.param().pad_w;
    fm.dilation[0] = opr.param().dilate_h;
    fm.dilation[1] = opr.param().dilate_w;

    megdnn::ConvBiasForward::BiasMode bias_mode =
            megdnn::ConvBiasForward::BiasMode::NO_BIAS;
M
Megvii Engine Team 已提交
1333
    if (std::is_same<OprType, opr::ConvBiasForward>::value && new_inp.size() > 2) {
1334 1335 1336 1337
        TensorShape bias_shape = new_inp[2]->shape();
        if (bias_shape.ndim == 5) {
            bias_shape = nchwxx_shape_2_nchw_shape(bias_shape);
        }
1338 1339 1340 1341 1342 1343 1344
        if (bias_shape.ndim == 0) {
            bias_mode = megdnn::ConvBiasForward::BiasMode::NO_BIAS;
        } else if (bias_shape.eq_shape(dst_node->shape())) {
            bias_mode = megdnn::ConvBiasForward::BiasMode::BIAS;
        } else {
            //! just check the ndim, the detail shape check is in check_exec
            mgb_assert(bias_shape.ndim == dst_node->shape().ndim);
M
Megvii Engine Team 已提交
1345
            bias_mode = megdnn::ConvBiasForward::BiasMode::BROADCAST_CHANNEL_BIAS;
1346 1347 1348
        }
    }

1349 1350 1351 1352 1353
    if (pack_size == 4) {
        if (is_dot && filter_node->dtype().enumv() == DTypeEnum::QuantizedS8) {
            fm.format = megdnn::param::Convolution::Format::NCHW44_DOT;
        } else {
            fm.format = megdnn::param::Convolution::Format::NCHW44;
1354
        }
1355 1356 1357 1358
    } else if (pack_size == 8) {
        fm.format = megdnn::param::Convolution::Format::NCHW88;
    } else {
        mgb_assert(0, "only support nchw44 nchw88");
1359 1360
    }

1361 1362 1363
    return megdnn::ConvBiasForward::is_nchw_nchwxx_optimized(
            src_node->dtype().enumv(), filter_node->dtype().enumv(),
            dst_node->dtype().enumv(), fm, bias_mode, nonline_mode);
1364
}
1365

M
Megvii Engine Team 已提交
1366
void EnableNchwxxPass::fill_opr_convert_fun(size_t pack_c_size) {
1367
    using TestFilterResult = std::pair<TransType, ReformatKey>;
M
Megvii Engine Team 已提交
1368 1369 1370 1371 1372 1373
    ReformatKey weight_to_nchwxx_mode_dense{
            TensorFormats::KCRS, TensorFormats::KCRSc8k8};
    ReformatKey weight_to_nchwxx_mode_group{
            TensorFormats::GKCRS, TensorFormats::GKCRSc8k8};
    ReformatKey weight_to_nchwxx_mode_chan{
            TensorFormats::C11RS, TensorFormats::C11RSc8};
1374 1375 1376
    ReformatKey hybrid_nchw_nchwxx{TensorFormats::KCRS, TensorFormats::KRSCk8};
    ReformatKey src_to_nchwxx_mode{TensorFormats::NCHW, TensorFormats::NCHWc8};
    ReformatKey src_to_nchw_mode{TensorFormats::NCHWc8, TensorFormats::NCHW};
1377 1378 1379
    megdnn::param::ConvBias::Format conv_bias_format =
            megdnn::param::ConvBias::Format::NCHW88;
    megdnn::param::Convolution::Format conv_format =
1380
            megdnn::param::Convolution::Format::NCHW88;
1381 1382
    megdnn::param::Pooling::Format pooling_format =
            megdnn::param::Pooling::Format::NCHW88;
M
Megvii Engine Team 已提交
1383
    megdnn::param::Resize::Format resize_format = megdnn::param::Resize::Format::NCHW88;
1384
    std::string convter_pass_name = "conv_format_nchw88";
1385

1386
    if (pack_c_size == 4) {
1387 1388 1389 1390 1391 1392
        weight_to_nchwxx_mode_dense.output_format = TensorFormats::KCRSc4k4;
        weight_to_nchwxx_mode_group.output_format = TensorFormats::GKCRSc4k4;
        weight_to_nchwxx_mode_chan.output_format = TensorFormats::C11RSc4;
        hybrid_nchw_nchwxx.output_format = TensorFormats::KRSCk4;
        src_to_nchwxx_mode.output_format = TensorFormats::NCHWc4;
        src_to_nchw_mode.input_format = TensorFormats::NCHWc4;
1393
        conv_bias_format = megdnn::param::ConvBias::Format::NCHW44;
1394
        conv_format = megdnn::param::Convolution::Format::NCHW44;
1395
        pooling_format = megdnn::param::Pooling::Format::NCHW44;
1396
        resize_format = megdnn::param::Resize::Format::NCHW44;
1397 1398
        convter_pass_name = "conv_format_nchw44";
    }
1399
    auto test_trans_nchwxx =
M
Megvii Engine Team 已提交
1400 1401
            [pack_c_size, weight_to_nchwxx_mode_dense, weight_to_nchwxx_mode_group,
             weight_to_nchwxx_mode_chan, hybrid_nchw_nchwxx](
1402
                    const megdnn::param::Convolution::Sparse conv_mode,
M
Megvii Engine Team 已提交
1403
                    const VarNode* filter, const size_t stride_h, const size_t stride_w,
1404
                    bool valid_nchw_nchw44) -> TestFilterResult {
1405 1406 1407
        TestFilterResult ret{TransType::TRANS_NONE, {}};
        if (conv_mode == megdnn::param::Convolution::Sparse::DENSE) {
            size_t OC = filter->shape()[0];
1408
            size_t IC = filter->shape()[1];
1409 1410 1411
            if ((IC % pack_c_size == 0) && (OC % pack_c_size == 0)) {
                ret.first = TransType::TRANS_PURE_NCHWXX;
                ret.second = weight_to_nchwxx_mode_dense;
1412
            } else if (valid_nchw_nchw44) {
1413 1414 1415 1416
                ret.first = TransType::TRANS_HYBIRD_NCHWXX;
                ret.second = hybrid_nchw_nchwxx;
            }
        } else {
M
Megvii Engine Team 已提交
1417 1418 1419
            mgb_throw_if(
                    conv_mode != megdnn::param::Convolution::Sparse::GROUP,
                    MegBrainError, "mode error");
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
            size_t group = filter->shape()[0];
            size_t ocpg = filter->shape()[1];
            size_t icpg = filter->shape()[2];
            if (icpg == 1 && ocpg == 1 && (group % pack_c_size == 0)) {
                ret.first = TransType::TRANS_PURE_NCHWXX;
                ret.second = weight_to_nchwxx_mode_chan;
            } else if ((icpg % pack_c_size == 0) && (ocpg % pack_c_size == 0)) {
                ret.first = TransType::TRANS_PURE_NCHWXX;
                ret.second = weight_to_nchwxx_mode_group;
            }
        }
        return ret;
    };
    auto replace_conv_opr = [test_trans_nchwxx, conv_format, src_to_nchwxx_mode,
M
Megvii Engine Team 已提交
1434 1435 1436
                             src_to_nchw_mode, pack_c_size](
                                    OperatorNodeBase* opr,
                                    const VarNodeArray& new_inp) {
1437 1438
        mgb_assert(opr->input().size() == new_inp.size());
        auto& conv_opr = opr->cast_final_safe<opr::ConvolutionForward>();
1439
        mgb_throw_if(
M
Megvii Engine Team 已提交
1440
                conv_opr.param().format != megdnn::param::Convolution::Format::NCHW,
1441 1442
                MegBrainError,
                "ConvertFormat Pass only support converting NCHW to NCHWXX");
M
Megvii Engine Team 已提交
1443
        bool valid_nchw_nchw44 = nchw_nchwxx_valid(conv_opr, new_inp, pack_c_size);
1444 1445 1446
        auto is_trans = test_trans_nchwxx(
                conv_opr.param().sparse, new_inp[1], conv_opr.param().stride_h,
                conv_opr.param().stride_w, valid_nchw_nchw44);
1447 1448
        //! can not trans to nchwxx
        if (is_trans.first == TransType::TRANS_NONE) {
M
Megvii Engine Team 已提交
1449 1450 1451
            mgb_assert(
                    new_inp[1]->shape().ndim == 4 || new_inp[1]->shape().ndim == 5,
                    "The origin filter is not NCHW mode");
1452 1453 1454
            VarNodeArray temp_inp = new_inp;
            //! if src is nchwxx, should RelayoutPlaceholder to nchw
            if (temp_inp[0]->shape().ndim == 5) {
M
Megvii Engine Team 已提交
1455
                auto new_src = RelayoutPlaceholder::make(new_inp[0], src_to_nchw_mode);
1456 1457
                temp_inp[0] = new_src.node();
            }
M
Megvii Engine Team 已提交
1458 1459
            auto new_opr =
                    serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
1460 1461 1462
            return new_opr;
        } else if (is_trans.first == TransType::TRANS_PURE_NCHWXX) {
            //! filter trans to nchwxx mode
M
Megvii Engine Team 已提交
1463 1464 1465
            mgb_assert(
                    new_inp[1]->shape().ndim == 4 || new_inp[1]->shape().ndim == 5,
                    "The origin filter is not NCHW mode");
1466
            VarNode *conv_src = new_inp[0], *conv_filter = new_inp[1];
M
Megvii Engine Team 已提交
1467
            auto new_filter = RelayoutPlaceholder::make(new_inp[1], is_trans.second);
1468 1469 1470 1471
            conv_filter = new_filter.node();
            //! src trans to nchwxx mode
            if (new_inp[0]->shape().ndim != 5) {
                mgb_assert(new_inp[0]->shape().ndim == 4);
M
Megvii Engine Team 已提交
1472 1473
                auto new_src =
                        RelayoutPlaceholder::make(new_inp[0], src_to_nchwxx_mode);
1474 1475 1476 1477
                conv_src = new_src.node();
            }
            auto new_param = conv_opr.param();
            new_param.format = conv_format;
M
Megvii Engine Team 已提交
1478 1479 1480
            mgb_assert(
                    conv_src->shape().ndim == 5 && conv_filter->shape().ndim >= 6,
                    "The conv src dim is not trans to nchwxx");
1481
            auto new_conv_opr = opr::Convolution::make(
M
Megvii Engine Team 已提交
1482 1483
                    conv_src, conv_filter, new_param, conv_opr.execution_policy(),
                    conv_opr.config());
1484
            OperatorNodeBase* new_opr = new_conv_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
1485 1486 1487
            mgb_assert(
                    new_conv_opr.shape().ndim == 5,
                    "The conv dst dim is not trans to nchwxx");
1488 1489 1490 1491
            return new_opr;
        } else {
            mgb_assert(is_trans.first == TransType::TRANS_HYBIRD_NCHWXX);
            VarNode *conv_src = new_inp[0], *conv_filter = new_inp[1];
M
Megvii Engine Team 已提交
1492
            auto new_filter = RelayoutPlaceholder::make(new_inp[1], is_trans.second);
1493
            conv_filter = new_filter.node();
M
Megvii Engine Team 已提交
1494 1495 1496
            mgb_assert(
                    conv_src->shape().ndim == 4 && conv_filter->shape().ndim == 5,
                    "The src and filter is OK");
1497 1498 1499
            auto new_param = conv_opr.param();
            new_param.format = conv_format;
            auto new_conv_opr = opr::Convolution::make(
M
Megvii Engine Team 已提交
1500 1501
                    conv_src, conv_filter, new_param, conv_opr.execution_policy(),
                    conv_opr.config());
1502
            OperatorNodeBase* new_opr = new_conv_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
1503 1504 1505
            mgb_assert(
                    new_conv_opr.shape().ndim == 5,
                    "The conv dst dim is not trans to nchwxx");
1506 1507 1508 1509 1510
            return new_opr;
        }
    };

    auto replace_conv_bias_opr = [test_trans_nchwxx, conv_bias_format,
M
Megvii Engine Team 已提交
1511 1512 1513
                                  src_to_nchwxx_mode, src_to_nchw_mode, pack_c_size](
                                         OperatorNodeBase* opr,
                                         const VarNodeArray& new_inp) {
1514
        mgb_assert(opr->input().size() == new_inp.size());
M
Megvii Engine Team 已提交
1515 1516 1517
        mgb_assert(
                opr->input().size() <= 3,
                "nchwxx does not support conv_bias fuse Z right now");
1518
        auto& conv_bias_opr = opr->cast_final_safe<opr::ConvBiasForward>();
1519
        mgb_throw_if(
M
Megvii Engine Team 已提交
1520
                conv_bias_opr.param().format != megdnn::param::ConvBias::Format::NCHW,
1521 1522
                MegBrainError,
                "ConvertFormat Pass only support converting NCHW to NCHWXX");
M
Megvii Engine Team 已提交
1523 1524
        bool valid_nchw_nchw44 = nchw_nchwxx_valid(
                conv_bias_opr, new_inp, pack_c_size, conv_bias_opr.param().nonlineMode);
1525 1526
        auto is_trans = test_trans_nchwxx(
                conv_bias_opr.param().sparse, new_inp[1],
1527 1528 1529
                conv_bias_opr.param().stride_h, conv_bias_opr.param().stride_w,
                valid_nchw_nchw44);

1530 1531
        //! can not trans to nchwxx
        if (is_trans.first == TransType::TRANS_NONE) {
M
Megvii Engine Team 已提交
1532 1533 1534
            mgb_assert(
                    new_inp[1]->shape().ndim == 4 || new_inp[1]->shape().ndim == 5,
                    "The origin filter is not NCHW mode");
1535 1536
            VarNodeArray temp_inp = new_inp;
            //! if src is nchwxx, should RelayoutPlaceholder to nchw
1537
            if (new_inp[0]->shape().ndim == 5) {
M
Megvii Engine Team 已提交
1538
                auto new_src = RelayoutPlaceholder::make(new_inp[0], src_to_nchw_mode);
1539 1540 1541
                temp_inp[0] = new_src.node();
            }
            //! the bias is nchwxx
1542
            if (new_inp.size() > 2 && new_inp[2]->shape().ndim == 5) {
M
Megvii Engine Team 已提交
1543
                auto new_bias = RelayoutPlaceholder::make(new_inp[2], src_to_nchw_mode);
1544 1545
                temp_inp[2] = new_bias.node();
            }
M
Megvii Engine Team 已提交
1546 1547
            auto new_opr =
                    serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
1548 1549 1550
            return new_opr;
        } else if (is_trans.first == TransType::TRANS_PURE_NCHWXX) {
            VarNode *conv_bias_src = new_inp[0], *conv_bias_filter = new_inp[1],
1551
                    *conv_bias_bias = nullptr;
1552
            //! filter trans to nchwxx mode
M
Megvii Engine Team 已提交
1553 1554 1555 1556
            mgb_assert(
                    new_inp[1]->shape().ndim == 4 || new_inp[1]->shape().ndim == 5,
                    "The origin filter is not NCHW mode");
            auto new_filter = RelayoutPlaceholder::make(new_inp[1], is_trans.second);
1557 1558 1559 1560
            conv_bias_filter = new_filter.node();
            //! src trans to nchwxx mode
            if (new_inp[0]->shape().ndim != 5) {
                mgb_assert(new_inp[0]->shape().ndim == 4);
M
Megvii Engine Team 已提交
1561 1562
                auto new_src =
                        RelayoutPlaceholder::make(new_inp[0], src_to_nchwxx_mode);
1563 1564
                conv_bias_src = new_src.node();
            }
1565 1566 1567
            //! bias trans to nchwxx mode
            if (new_inp.size() > 2) {
                if (new_inp[2]->shape().ndim == 4) {
M
Megvii Engine Team 已提交
1568 1569
                    auto new_bias =
                            RelayoutPlaceholder::make(new_inp[2], src_to_nchwxx_mode);
1570 1571 1572 1573 1574
                    conv_bias_bias = new_bias.node();
                } else {
                    mgb_assert(new_inp[2]->shape().ndim == 5);
                    conv_bias_bias = new_inp[2];
                }
1575 1576 1577
            }
            auto new_param = conv_bias_opr.param();
            new_param.format = conv_bias_format;
M
Megvii Engine Team 已提交
1578 1579 1580 1581
            mgb_assert(
                    conv_bias_src->shape().ndim == 5 &&
                            conv_bias_filter->shape().ndim >= 6,
                    "The conv_bias src dim is not trans to nchwxx");
1582 1583 1584
            SymbolVar new_conv_bias_opr;
            if (conv_bias_bias) {
                new_conv_bias_opr = opr::ConvBias::make(
M
Megvii Engine Team 已提交
1585 1586
                        conv_bias_src, conv_bias_filter, conv_bias_bias, new_param,
                        conv_bias_opr.execution_policy(), conv_bias_opr.config());
1587 1588 1589
            } else {
                new_conv_bias_opr = opr::ConvBias::make(
                        conv_bias_src, conv_bias_filter, new_param,
M
Megvii Engine Team 已提交
1590
                        conv_bias_opr.execution_policy(), conv_bias_opr.config());
1591
            }
1592
            OperatorNodeBase* new_opr = new_conv_bias_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
1593 1594 1595
            mgb_assert(
                    new_conv_bias_opr.shape().ndim == 5,
                    "The conv_bias dst dim is not trans to nchwxx");
1596 1597 1598 1599
            return new_opr;
        } else {
            mgb_assert(is_trans.first == TransType::TRANS_HYBIRD_NCHWXX);
            VarNode *conv_bias_src = new_inp[0], *conv_bias_filter = new_inp[1],
1600
                    *conv_bias_bias = nullptr;
M
Megvii Engine Team 已提交
1601
            auto new_filter = RelayoutPlaceholder::make(new_inp[1], is_trans.second);
1602 1603
            conv_bias_filter = new_filter.node();
            //! bias trans to nchwxx mode, bias may be scale
1604 1605
            if (new_inp.size() > 2) {
                if (new_inp[2]->shape().ndim == 4) {
M
Megvii Engine Team 已提交
1606 1607
                    auto new_bias =
                            RelayoutPlaceholder::make(new_inp[2], src_to_nchwxx_mode);
1608 1609 1610 1611 1612
                    conv_bias_bias = new_bias.node();
                } else {
                    mgb_assert(new_inp[2]->shape().ndim == 5);
                    conv_bias_bias = new_inp[2];
                }
1613
            }
M
Megvii Engine Team 已提交
1614 1615 1616
            mgb_assert(
                    conv_bias_src->shape().ndim == 4 &&
                    conv_bias_filter->shape().ndim == 5);
1617 1618
            auto new_param = conv_bias_opr.param();
            new_param.format = conv_bias_format;
1619 1620 1621
            SymbolVar new_conv_bias_opr;
            if (conv_bias_bias) {
                new_conv_bias_opr = opr::ConvBias::make(
M
Megvii Engine Team 已提交
1622 1623
                        conv_bias_src, conv_bias_filter, conv_bias_bias, new_param,
                        conv_bias_opr.execution_policy(), conv_bias_opr.config());
1624 1625 1626
            } else {
                new_conv_bias_opr = opr::ConvBias::make(
                        conv_bias_src, conv_bias_filter, new_param,
M
Megvii Engine Team 已提交
1627
                        conv_bias_opr.execution_policy(), conv_bias_opr.config());
1628
            }
1629
            OperatorNodeBase* new_opr = new_conv_bias_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
1630 1631 1632
            mgb_assert(
                    new_conv_bias_opr.shape().ndim == 5,
                    "The conv dst dim is not trans to nchwxx");
1633 1634 1635 1636
            return new_opr;
        }
    };

M
Megvii Engine Team 已提交
1637
    auto replace_pooling_opr = [=](OperatorNodeBase* opr, const VarNodeArray& new_inp) {
1638 1639
        mgb_assert(opr->input().size() == new_inp.size());
        auto& pooling_opr = opr->cast_final_safe<opr::PoolingForward>();
1640
        mgb_throw_if(
M
Megvii Engine Team 已提交
1641
                pooling_opr.param().format != megdnn::param::Pooling::Format::NCHW,
1642 1643
                MegBrainError,
                "ConvertFormat Pass only support converting NCHW to NCHWxx");
1644 1645 1646 1647 1648
        VarNode* inp = new_inp[0];
        //! if input is nchwxx
        if (inp->shape().ndim == 5) {
            auto new_param = pooling_opr.param();
            new_param.format = pooling_format;
1649 1650
            auto new_pooling_opr = opr::PoolingForward::make(
                    inp, new_param, pooling_opr.execution_policy(), opr->config());
M
Megvii Engine Team 已提交
1651 1652 1653
            mgb_assert(
                    new_pooling_opr.shape().ndim == 5,
                    "The pooling dst dim is not trans to nchwxx");
1654 1655
            return new_pooling_opr.node()->owner_opr();
        } else {
M
Megvii Engine Team 已提交
1656 1657
            auto new_opr =
                    serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1658 1659 1660
            return new_opr;
        }
    };
M
Megvii Engine Team 已提交
1661 1662

    auto replace_resize_opr = [=](OperatorNodeBase* opr, const VarNodeArray& new_inp) {
1663 1664 1665
        mgb_assert(opr->input().size() == new_inp.size());
        auto& resize_opr = opr->cast_final_safe<opr::ResizeForward>();
        mgb_throw_if(
M
Megvii Engine Team 已提交
1666
                resize_opr.param().format != megdnn::param::Resize::Format::NCHW &&
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
                        resize_opr.param().format !=
                                megdnn::param::Resize::Format::NHWC,
                MegBrainError,
                "ConvertFormat Pass only support converting NCHW to NCHWxx");

        VarNode* inp = new_inp[0];
        if (resize_opr.param().format == megdnn::param::Resize::Format::NHWC) {
            auto temp_inp = new_inp;
            if (inp->shape().ndim == 5) {
                auto new_var = RelayoutPlaceholder::make(inp, src_to_nchw_mode);
                temp_inp[0] = new_var.node();
            }
M
Megvii Engine Team 已提交
1679
            return serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
1680 1681 1682 1683 1684 1685 1686 1687 1688
        } else {
            auto temp_inp = new_inp;
            if (inp->shape().ndim == 5) {
                auto new_param = resize_opr.param();
                new_param.format = resize_format;
                auto new_resize_opr = opr::ResizeForward::make(
                        new_inp[0], new_inp[1], new_param, opr->config());
                return new_resize_opr.node()->owner_opr();
            } else {
M
Megvii Engine Team 已提交
1689
                return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1690 1691 1692 1693
            }
        }
    };

M
Megvii Engine Team 已提交
1694
    auto replace_reduce_opr = [=](OperatorNodeBase* opr, const VarNodeArray& new_inp) {
1695 1696 1697 1698 1699 1700 1701 1702
        mgb_assert(opr->input().size() == new_inp.size());
        auto& reduce_opr = opr->cast_final_safe<opr::Reduce>();

        VarNodeArray temp_inp = new_inp;
        if (!opr->input(0)->shape().eq_shape(new_inp[0]->shape())) {
            mgb_assert(opr->input(0)->shape().ndim == 4);
            mgb_assert(new_inp[0]->shape().ndim == 5);
            if (reduce_opr.param().axis != 2 && reduce_opr.param().axis != 3) {
M
Megvii Engine Team 已提交
1703
                auto new_var = RelayoutPlaceholder::make(new_inp[0], src_to_nchw_mode);
1704 1705 1706 1707 1708 1709
                temp_inp[0] = new_var.node();
            }
        }
        return serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
    };

1710 1711 1712 1713 1714
    //! When input change and all input can convert to nchwxx, this opr will run
    //! in nchwxx mode, else it will run in nchw mode, for example concat and
    //! elemwise opr
    auto replace_multi_inp_opr = [=](OperatorNodeBase* opr,
                                     const VarNodeArray& new_inp) {
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
        mgb_assert(opr->input().size() == new_inp.size());
        bool has_inp_changed = false;
        bool can_exec_ncwxx = true;
        for (size_t i = 0; i < opr->input().size(); i++) {
            if (new_inp[i]->shape().ndim == 5) {
                has_inp_changed = true;
            } else if (new_inp[i]->shape().ndim == 4) {
                if (new_inp[i]->shape()[1] % pack_c_size != 0) {
                    can_exec_ncwxx = false;
                }
1725 1726
            } else if (!new_inp[i]->shape().is_scalar()) {
                can_exec_ncwxx = false;
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
            }
        }
        if (has_inp_changed) {
            auto temp_inp = new_inp;
            if (can_exec_ncwxx) {
                for (size_t i = 0; i < opr->input().size(); i++) {
                    if (new_inp[i]->shape().ndim == 4) {
                        auto new_var = RelayoutPlaceholder::make(
                                new_inp[i], src_to_nchwxx_mode);
                        temp_inp[i] = new_var.node();
                    } else {
M
Megvii Engine Team 已提交
1738 1739 1740
                        mgb_assert(
                                (new_inp[i]->shape().ndim == 5) ||
                                new_inp[i]->shape().is_scalar());
1741 1742 1743 1744 1745
                    }
                }
            } else {
                for (size_t i = 0; i < opr->input().size(); i++) {
                    if (new_inp[i]->shape().ndim == 5) {
M
Megvii Engine Team 已提交
1746 1747
                        auto new_var =
                                RelayoutPlaceholder::make(new_inp[i], src_to_nchw_mode);
1748 1749 1750 1751
                        temp_inp[i] = new_var.node();
                    }
                }
            }
M
Megvii Engine Team 已提交
1752
            return serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
1753
        } else {
M
Megvii Engine Team 已提交
1754
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
1755 1756 1757
        }
    };

1758
    auto relayout_inp_to_nchw = [=](OperatorNodeBase* opr,
M
Megvii Engine Team 已提交
1759
                                    const VarNodeArray& new_inp) {
1760 1761 1762 1763 1764 1765
        mgb_assert(opr->input().size() == new_inp.size());
        VarNodeArray temp_inp = new_inp;
        for (size_t i = 0; i < opr->input().size(); i++) {
            if (!opr->input(i)->shape().eq_shape(new_inp[i]->shape())) {
                mgb_assert(opr->input(i)->shape().ndim == 4);
                mgb_assert(new_inp[i]->shape().ndim == 5);
M
Megvii Engine Team 已提交
1766
                auto new_var = RelayoutPlaceholder::make(new_inp[i], src_to_nchw_mode);
1767 1768 1769 1770 1771 1772
                temp_inp[i] = new_var.node();
            }
        }
        return serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
    };

1773
    auto&& replace_func = m_opr_replace_func;
1774 1775 1776 1777
    //! supportted nchwxx
    replace_func[opr::Convolution::typeinfo()] = replace_conv_opr;
    replace_func[opr::ConvBias::typeinfo()] = replace_conv_bias_opr;
    replace_func[opr::PoolingForward::typeinfo()] = replace_pooling_opr;
1778
    replace_func[opr::ResizeForward::typeinfo()] = replace_resize_opr;
1779 1780 1781 1782 1783
    replace_func[opr::Concat::typeinfo()] = replace_multi_inp_opr;
    replace_func[opr::Elemwise::typeinfo()] = replace_multi_inp_opr;
    replace_func[opr::TypeCvt::typeinfo()] = replace_multi_inp_opr;
    replace_func[opr::ElemwiseMultiType::typeinfo()] = replace_multi_inp_opr;
    replace_func[opr::PowC::typeinfo()] = replace_multi_inp_opr;
1784
    replace_func[opr::Reduce::typeinfo()] = replace_reduce_opr;
1785
    //! not support yet
M
Megvii Engine Team 已提交
1786
    replace_func[opr::ConvolutionBackwardData::typeinfo()] = relayout_inp_to_nchw;
1787 1788 1789 1790 1791
    replace_func[opr::Subtensor::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::GetVarShape::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::Dimshuffle::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::AssertEqual::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::IncrSubtensor::typeinfo()] = relayout_inp_to_nchw;
M
Megvii Engine Team 已提交
1792
    replace_func[opr::WarpPerspectiveForward::typeinfo()] = relayout_inp_to_nchw;
1793
    replace_func[opr::WarpAffineForward::typeinfo()] = relayout_inp_to_nchw;
1794
    replace_func[opr::Reshape::typeinfo()] = relayout_inp_to_nchw;
1795 1796 1797 1798
    replace_func[opr::AxisAddRemove::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::Argmax::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::Broadcast::typeinfo()] = relayout_inp_to_nchw;
    replace_func[opr::ImmutableTensor::typeinfo()] = relayout_inp_to_nchw;
1799 1800 1801 1802
}

std::unique_ptr<EnableNchwxxPass> EnableNchwxxPass::make_nchwxx_converter(
        size_t pack_c_size) {
1803
    MIDOUT_B("EnableNchwxxPass::make")
1804 1805 1806 1807 1808 1809 1810 1811 1812
    auto ret = std::make_unique<EnableNchwxxPass>(pack_c_size);
    ret->set_var_replace_check_flag(VarReplaceCheckFlag::NOCHECK);
    std::string convter_pass_name = "conv_format_nchw88";
    if (pack_c_size == 4) {
        convter_pass_name = "conv_format_nchw44";
    }
    ret->fill_opr_convert_fun(pack_c_size);
    ret->set_name(convter_pass_name);
    return ret;
1813
    MIDOUT_E
1814 1815 1816
}

/* ================ EnableNchw44DotPass =============== */
M
Megvii Engine Team 已提交
1817 1818
VarNode* EnableNchw44DotPass::on_graph_endpoint_var(
        VarNode* new_var, VarNode* orig_var) const {
1819 1820
    if (!orig_var->shape().eq_shape(new_var->shape())) {
        return RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
1821
                       new_var, ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHW})
1822 1823 1824 1825 1826
                .node();
    }
    return new_var;
}

M
Megvii Engine Team 已提交
1827
std::unique_ptr<EnableNchw44DotPass> EnableNchw44DotPass::make_nchw44_dot_converter() {
1828
    MIDOUT_B("EnableNchw44DotPass::make")
1829 1830
    auto ret = std::make_unique<EnableNchw44DotPass>();
    ret->set_var_replace_check_flag(VarReplaceCheckFlag::NOCHECK);
1831 1832
    //! First is whether the conv can trans to nchwxx, second is the filter
    //! trans mode
1833

1834 1835
    struct TestTransResult {
        TransType trans_type;
1836
        ReformatKey relayout_mod;
1837
        megdnn::param::Convolution::Format conv_format;
1838
    };
1839
    constexpr size_t pack_c_size = 4_z;
M
Megvii Engine Team 已提交
1840 1841 1842 1843
    auto test_trans_nchw44_dot = [](const megdnn::param::Convolution::Sparse conv_mode,
                                    const VarNode* filter, const size_t stride_h,
                                    const size_t stride_w,
                                    const bool valid_nchw_nchw44) -> TestTransResult {
1844
        TestTransResult ret{TransType::TRANS_NONE, {}, {}};
1845 1846
        bool is_int8 = filter->dtype().enumv() == DTypeEnum::QuantizedS8 ||
                       filter->dtype().enumv() == DTypeEnum::Int8;
1847 1848
        if (conv_mode == megdnn::param::Convolution::Sparse::DENSE) {
            size_t OC = filter->shape()[0];
1849
            size_t IC = filter->shape()[1];
1850
            if ((IC % pack_c_size == 0) && (OC % pack_c_size == 0)) {
1851
                ret.trans_type = TransType::TRANS_PURE_NCHWXX;
1852
                if (is_int8) {
M
Megvii Engine Team 已提交
1853 1854 1855
                    ret.relayout_mod =
                            ReformatKey{TensorFormats::KCRS, TensorFormats::KCRSk4c4};
                    ret.conv_format = megdnn::param::ConvBias::Format::NCHW44_DOT;
1856
                } else {
M
Megvii Engine Team 已提交
1857 1858
                    ret.relayout_mod =
                            ReformatKey{TensorFormats::KCRS, TensorFormats::KCRSc4k4};
1859 1860 1861
                    ret.conv_format = megdnn::param::ConvBias::Format::NCHW44;
                }
            } else if (valid_nchw_nchw44) {
1862
                ret.trans_type = TransType::TRANS_HYBIRD_NCHWXX;
1863 1864
                ret.relayout_mod =
                        ReformatKey{TensorFormats::KCRS, TensorFormats::KRSCk4};
1865
                if (is_int8) {
M
Megvii Engine Team 已提交
1866
                    ret.conv_format = megdnn::param::ConvBias::Format::NCHW44_DOT;
1867 1868 1869
                } else {
                    ret.conv_format = megdnn::param::ConvBias::Format::NCHW44;
                }
1870 1871
            }
        } else {
M
Megvii Engine Team 已提交
1872 1873 1874
            mgb_throw_if(
                    conv_mode != megdnn::param::Convolution::Sparse::GROUP,
                    MegBrainError, "mode error");
1875 1876 1877 1878
            size_t group = filter->shape()[0];
            size_t ocpg = filter->shape()[1];
            size_t icpg = filter->shape()[2];
            if (icpg == 1 && ocpg == 1 && (group % pack_c_size == 0)) {
1879
                ret.trans_type = TransType::TRANS_PURE_NCHWXX;
M
Megvii Engine Team 已提交
1880 1881
                ret.relayout_mod =
                        ReformatKey{TensorFormats::C11RS, TensorFormats::C11RSc4};
1882
                ret.conv_format = megdnn::param::ConvBias::Format::NCHW44;
1883
            } else if ((icpg % pack_c_size == 0) && (ocpg % pack_c_size == 0)) {
1884
                ret.trans_type = TransType::TRANS_PURE_NCHWXX;
1885
                if (is_int8) {
M
Megvii Engine Team 已提交
1886 1887 1888
                    ret.relayout_mod =
                            ReformatKey{TensorFormats::GKCRS, TensorFormats::GKCRSk4c4};
                    ret.conv_format = megdnn::param::ConvBias::Format::NCHW44_DOT;
1889
                } else {
M
Megvii Engine Team 已提交
1890 1891
                    ret.relayout_mod =
                            ReformatKey{TensorFormats::GKCRS, TensorFormats::GKCRSc4k4};
1892 1893
                    ret.conv_format = megdnn::param::ConvBias::Format::NCHW44;
                }
1894 1895 1896 1897
            }
        }
        return ret;
    };
1898
    auto replace_conv_opr = [test_trans_nchw44_dot](
1899 1900 1901 1902
                                    OperatorNodeBase* opr,
                                    const VarNodeArray& new_inp) {
        mgb_assert(opr->input().size() == new_inp.size());
        auto& conv_opr = opr->cast_final_safe<opr::ConvolutionForward>();
M
Megvii Engine Team 已提交
1903 1904 1905 1906 1907
        mgb_throw_if(
                conv_opr.param().format != megdnn::param::Convolution::Format::NCHW,
                MegBrainError,
                "ConvertFormat Pass only support converting NCHW to "
                "NCHW44_DOT");
1908 1909 1910
        bool valid_nchw_nchw44 = nchw_nchwxx_valid(
                conv_opr, new_inp, pack_c_size,
                megdnn::param::ConvBias::NonlineMode::IDENTITY, true);
1911 1912
        auto is_trans = test_trans_nchw44_dot(
                conv_opr.param().sparse, new_inp[1], conv_opr.param().stride_h,
1913
                conv_opr.param().stride_w, valid_nchw_nchw44);
1914
        //! can not trans to nchwxx
1915
        if (is_trans.trans_type == TransType::TRANS_NONE) {
M
Megvii Engine Team 已提交
1916 1917 1918
            mgb_assert(
                    new_inp[1]->shape().ndim == 4 || new_inp[1]->shape().ndim == 5,
                    "The origin filter is not NCHW mode");
1919 1920 1921 1922
            VarNodeArray temp_inp = new_inp;
            //! if src is nchwxx, should RelayoutPlaceholder to nchw
            if (temp_inp[0]->shape().ndim == 5) {
                auto new_src = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
1923 1924
                        new_inp[0],
                        ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHW});
1925 1926
                temp_inp[0] = new_src.node();
            }
M
Megvii Engine Team 已提交
1927 1928
            auto new_opr =
                    serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
1929
            return new_opr;
1930
        } else if (is_trans.trans_type == TransType::TRANS_PURE_NCHWXX) {
1931
            //! filter trans to nchwxx mode
M
Megvii Engine Team 已提交
1932 1933 1934
            mgb_assert(
                    new_inp[1]->shape().ndim == 4 || new_inp[1]->shape().ndim == 5,
                    "The origin filter is not NCHW mode");
1935
            VarNode *conv_src = new_inp[0], *conv_filter = new_inp[1];
M
Megvii Engine Team 已提交
1936 1937
            auto new_filter =
                    RelayoutPlaceholder::make(new_inp[1], is_trans.relayout_mod);
1938 1939 1940 1941 1942
            conv_filter = new_filter.node();
            //! src trans to nchwxx mode
            if (new_inp[0]->shape().ndim != 5) {
                mgb_assert(new_inp[0]->shape().ndim == 4);
                auto new_src = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
1943 1944
                        new_inp[0],
                        ReformatKey{TensorFormats::NCHW, TensorFormats::NCHWc4});
1945 1946 1947
                conv_src = new_src.node();
            }
            auto new_param = conv_opr.param();
1948
            new_param.format = is_trans.conv_format;
M
Megvii Engine Team 已提交
1949 1950 1951
            mgb_assert(
                    conv_src->shape().ndim == 5 && conv_filter->shape().ndim >= 6,
                    "The conv src dim is not trans to nchwxx");
1952
            auto new_conv_opr = opr::Convolution::make(
M
Megvii Engine Team 已提交
1953 1954
                    conv_src, conv_filter, new_param, conv_opr.execution_policy(),
                    conv_opr.config());
1955
            OperatorNodeBase* new_opr = new_conv_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
1956 1957 1958
            mgb_assert(
                    new_conv_opr.shape().ndim == 5,
                    "The conv dst dim is not trans to nchwxx");
1959 1960
            return new_opr;
        } else {
1961
            mgb_assert(is_trans.trans_type == TransType::TRANS_HYBIRD_NCHWXX);
1962
            VarNode *conv_src = new_inp[0], *conv_filter = new_inp[1];
M
Megvii Engine Team 已提交
1963 1964
            auto new_filter =
                    RelayoutPlaceholder::make(new_inp[1], is_trans.relayout_mod);
1965
            conv_filter = new_filter.node();
M
Megvii Engine Team 已提交
1966 1967 1968
            mgb_assert(
                    conv_src->shape().ndim == 4 && conv_filter->shape().ndim == 5,
                    "The src and filter is OK");
1969
            auto new_param = conv_opr.param();
1970
            new_param.format = is_trans.conv_format;
1971
            auto new_conv_opr = opr::Convolution::make(
M
Megvii Engine Team 已提交
1972 1973
                    conv_src, conv_filter, new_param, conv_opr.execution_policy(),
                    conv_opr.config());
1974
            OperatorNodeBase* new_opr = new_conv_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
1975 1976 1977
            mgb_assert(
                    new_conv_opr.shape().ndim == 5,
                    "The conv dst dim is not trans to nchwxx");
1978 1979 1980 1981
            return new_opr;
        }
    };

1982
    auto replace_conv_bias_opr = [test_trans_nchw44_dot](
1983 1984 1985
                                         OperatorNodeBase* opr,
                                         const VarNodeArray& new_inp) {
        mgb_assert(opr->input().size() == new_inp.size());
M
Megvii Engine Team 已提交
1986 1987 1988
        mgb_assert(
                opr->input().size() <= 3,
                "nchwxx-dot does not support conv_bias fuse Z right now");
1989
        auto& conv_bias_opr = opr->cast_final_safe<opr::ConvBiasForward>();
1990
        mgb_throw_if(
M
Megvii Engine Team 已提交
1991
                conv_bias_opr.param().format != megdnn::param::ConvBias::Format::NCHW,
1992 1993
                MegBrainError,
                "ConvertFormat Pass only support converting NCHW to NCHWXX");
M
Megvii Engine Team 已提交
1994 1995 1996
        bool valid_nchw_nchw44 = nchw_nchwxx_valid(
                conv_bias_opr, new_inp, pack_c_size, conv_bias_opr.param().nonlineMode,
                true);
1997 1998
        auto is_trans = test_trans_nchw44_dot(
                conv_bias_opr.param().sparse, new_inp[1],
1999 2000
                conv_bias_opr.param().stride_h, conv_bias_opr.param().stride_w,
                valid_nchw_nchw44);
M
Megvii Engine Team 已提交
2001 2002
        auto megdnn_conv = opr::intl::get_megdnn_handle(conv_bias_opr.comp_node())
                                   ->create_operator<megdnn::ConvBiasForward>();
2003 2004
        SmallVector<TensorLayout> layouts;

2005
        //! can not trans to nchwxx
2006
        if (is_trans.trans_type == TransType::TRANS_NONE) {
M
Megvii Engine Team 已提交
2007 2008 2009
            mgb_assert(
                    new_inp[1]->shape().ndim == 4 || new_inp[1]->shape().ndim == 5,
                    "The origin filter is not NCHW mode");
2010 2011 2012 2013
            VarNodeArray temp_inp = new_inp;
            //! if src is nchwxx, should RelayoutPlaceholder to nchw
            if (temp_inp[0]->shape().ndim == 5) {
                auto new_src = RelayoutPlaceholder::make(
2014 2015
                        new_inp[0],
                        ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHW});
2016 2017
                temp_inp[0] = new_src.node();
            }
2018

2019
            //! the bias is nchwxx
2020
            if (new_inp.size() > 2 && temp_inp[2]->shape().ndim == 5) {
2021
                auto new_bias = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
2022 2023
                        new_inp[2],
                        ReformatKey{TensorFormats::NCHWc4, TensorFormats::NCHW});
2024 2025
                temp_inp[2] = new_bias.node();
            }
M
Megvii Engine Team 已提交
2026 2027
            auto new_opr =
                    serialization::copy_opr_shallow(*opr, temp_inp, opr->config());
2028
            return new_opr;
2029
        } else if (is_trans.trans_type == TransType::TRANS_PURE_NCHWXX) {
2030
            VarNode *conv_bias_src = new_inp[0], *conv_bias_filter = new_inp[1],
2031
                    *conv_bias_bias = nullptr;
2032
            //! filter trans to nchwxx mode
M
Megvii Engine Team 已提交
2033 2034 2035 2036 2037
            mgb_assert(
                    new_inp[1]->shape().ndim == 4 || new_inp[1]->shape().ndim == 5,
                    "The origin filter is not NCHW mode");
            auto new_filter =
                    RelayoutPlaceholder::make(new_inp[1], is_trans.relayout_mod);
2038 2039 2040 2041 2042
            conv_bias_filter = new_filter.node();
            //! src trans to nchwxx mode
            if (new_inp[0]->shape().ndim != 5) {
                mgb_assert(new_inp[0]->shape().ndim == 4);
                auto new_src = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
2043 2044
                        new_inp[0],
                        ReformatKey{TensorFormats::NCHW, TensorFormats::NCHWc4});
2045 2046
                conv_bias_src = new_src.node();
            }
2047 2048 2049 2050
            //! bias trans to nchwxx mode
            if (new_inp.size() > 2) {
                if (new_inp[2]->shape().ndim == 4) {
                    auto new_bias = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
2051 2052
                            new_inp[2],
                            ReformatKey{TensorFormats::NCHW, TensorFormats::NCHWc4});
2053 2054 2055 2056 2057
                    conv_bias_bias = new_bias.node();
                } else {
                    mgb_assert(new_inp[2]->shape().ndim == 5);
                    conv_bias_bias = new_inp[2];
                }
2058 2059
            }
            auto new_param = conv_bias_opr.param();
2060
            new_param.format = is_trans.conv_format;
M
Megvii Engine Team 已提交
2061 2062 2063 2064
            mgb_assert(
                    conv_bias_src->shape().ndim == 5 &&
                            conv_bias_filter->shape().ndim >= 6,
                    "The conv_bias src dim is not trans to nchwxx");
2065 2066 2067
            SymbolVar new_conv_bias_opr;
            if (conv_bias_bias) {
                new_conv_bias_opr = opr::ConvBias::make(
M
Megvii Engine Team 已提交
2068 2069
                        conv_bias_src, conv_bias_filter, conv_bias_bias, new_param,
                        conv_bias_opr.execution_policy(), conv_bias_opr.config());
2070 2071 2072
            } else {
                new_conv_bias_opr = opr::ConvBias::make(
                        conv_bias_src, conv_bias_filter, new_param,
M
Megvii Engine Team 已提交
2073
                        conv_bias_opr.execution_policy(), conv_bias_opr.config());
2074
            }
2075
            OperatorNodeBase* new_opr = new_conv_bias_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
2076 2077 2078
            mgb_assert(
                    new_conv_bias_opr.shape().ndim == 5,
                    "The conv_bias dst dim is not trans to nchwxx");
2079 2080
            return new_opr;
        } else {
2081
            mgb_assert(is_trans.trans_type == TransType::TRANS_HYBIRD_NCHWXX);
2082
            VarNode *conv_bias_src = new_inp[0], *conv_bias_filter = new_inp[1],
2083
                    *conv_bias_bias = nullptr;
M
Megvii Engine Team 已提交
2084 2085
            auto new_filter =
                    RelayoutPlaceholder::make(new_inp[1], is_trans.relayout_mod);
2086 2087
            conv_bias_filter = new_filter.node();
            //! bias trans to nchwxx mode, bias may be scale
2088 2089 2090
            if (new_inp.size() > 2) {
                if (new_inp[2]->shape().ndim == 4) {
                    auto new_bias = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
2091 2092
                            new_inp[2],
                            ReformatKey{TensorFormats::NCHW, TensorFormats::NCHWc4});
2093 2094 2095 2096 2097
                    conv_bias_bias = new_bias.node();
                } else {
                    mgb_assert(new_inp[2]->shape().ndim == 5);
                    conv_bias_bias = new_inp[2];
                }
2098
            }
M
Megvii Engine Team 已提交
2099 2100 2101
            mgb_assert(
                    conv_bias_src->shape().ndim == 4 &&
                    conv_bias_filter->shape().ndim == 5);
2102
            auto new_param = conv_bias_opr.param();
2103
            new_param.format = is_trans.conv_format;
2104 2105 2106
            SymbolVar new_conv_bias_opr;
            if (conv_bias_bias) {
                new_conv_bias_opr = opr::ConvBias::make(
M
Megvii Engine Team 已提交
2107 2108
                        conv_bias_src, conv_bias_filter, conv_bias_bias, new_param,
                        conv_bias_opr.execution_policy(), conv_bias_opr.config());
2109 2110 2111
            } else {
                new_conv_bias_opr = opr::ConvBias::make(
                        conv_bias_src, conv_bias_filter, new_param,
M
Megvii Engine Team 已提交
2112
                        conv_bias_opr.execution_policy(), conv_bias_opr.config());
2113
            }
2114
            OperatorNodeBase* new_opr = new_conv_bias_opr.node()->owner_opr();
M
Megvii Engine Team 已提交
2115 2116 2117
            mgb_assert(
                    new_conv_bias_opr.shape().ndim == 5,
                    "The conv dst dim is not trans to nchwxx");
2118 2119 2120 2121 2122 2123 2124 2125
            return new_opr;
        }
    };
    ret->fill_opr_convert_fun(4);
    auto&& replace_func = ret->m_opr_replace_func;
    //! supportted nchwxx
    replace_func[opr::Convolution::typeinfo()] = replace_conv_opr;
    replace_func[opr::ConvBias::typeinfo()] = replace_conv_bias_opr;
2126
    return ret;
2127
    MIDOUT_E
2128 2129 2130 2131
}

/* ==================== ShuffleShuffleRemovePass ================= */
class ShuffleShuffleRemovePass::Impl {
2132
    using Format = opr::ConvBias::Param::Format;
2133 2134

    OptState& m_opt_state;
2135
    using AbstractShuffleOpr = TensorReformatPass::RelayoutPlaceholder;
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149

    void detect_shuffle_operations();
    void do_replace();

public:
    Impl(OptState& opt_state) : m_opt_state{opt_state} {
        detect_shuffle_operations();
        do_replace();
    }
};

void ShuffleShuffleRemovePass::Impl::detect_shuffle_operations() {
    auto rewriter = m_opt_state.graph().make_rewriter();
    auto uniq_reader_check = UniqReaderCheck{m_opt_state.graph()};
M
Megvii Engine Team 已提交
2150
    auto try_reshape_shuffle = [&rewriter, &uniq_reader_check](OperatorNodeBase* opr) {
2151 2152 2153 2154 2155 2156 2157 2158 2159
        // check shuffle
        auto shuffle = try_cast_as_op<opr::Dimshuffle>(opr);
        if (shuffle == nullptr)
            return false;
        auto&& param = shuffle->param();
        if (param.pattern_len != 5)
            return false;
        bool is_nchw2nchw4 = param.pattern[0] == 0 && param.pattern[1] == 1 &&
                             param.pattern[2] == 3 && param.pattern[3] == 4 &&
M
Megvii Engine Team 已提交
2160
                             param.pattern[4] == 2 && opr->output(0)->shape()[4] == 4;
2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
        if (!is_nchw2nchw4)
            return false;
        if (!uniq_reader_check(shuffle->input(0)))
            return false;

        // check reshape
        auto reshape = try_cast_as_op<opr::Reshape>(opr->input(0)->owner_opr());
        if (reshape == nullptr)
            return false;
        auto inp_var = rewriter.get_var(reshape->input(0));
        auto abstract_shuffle = AbstractShuffleOpr::make(
M
Megvii Engine Team 已提交
2172
                inp_var, ReformatKey{TensorFormats::NCHW, TensorFormats::NCHWc4});
2173 2174 2175 2176 2177 2178 2179
        rewriter.replace_var(
                opr->output(0), abstract_shuffle.node(),
                mgb_cstr_log("replace reformat(nchw -> nchw4) to "
                             "AbstractShuffleOpr(nchw -> nchw4)."));
        return true;
    };

M
Megvii Engine Team 已提交
2180 2181
    auto try_reshape_shuffle_reshape = [&rewriter,
                                        &uniq_reader_check](OperatorNodeBase* opr) {
2182 2183 2184 2185 2186 2187 2188 2189
        // check reshape
        auto reshape1 = try_cast_as_op<opr::Reshape>(opr);
        if (reshape1 == nullptr)
            return false;
        if (!uniq_reader_check(reshape1->input(0)))
            return false;

        // check shuffle
M
Megvii Engine Team 已提交
2190
        auto shuffle = try_cast_as_op<opr::Dimshuffle>(opr->input(0)->owner_opr());
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
        if (shuffle == nullptr)
            return false;
        auto&& param = shuffle->param();
        if (param.pattern_len != 6)
            return false;
        bool is_nchw42nchw32 = param.pattern[0] == 0 && param.pattern[1] == 1 &&
                               param.pattern[2] == 3 && param.pattern[3] == 4 &&
                               param.pattern[4] == 2 && param.pattern[5] == 5 &&
                               shuffle->input(0)->shape()[5] == 4 &&
                               shuffle->input(0)->shape()[2] == 8;
        bool is_nchw322nchw4 = param.pattern[0] == 0 && param.pattern[1] == 1 &&
                               param.pattern[2] == 4 && param.pattern[3] == 2 &&
                               param.pattern[4] == 3 && param.pattern[5] == 5 &&
                               shuffle->input(0)->shape()[4] == 8 &&
                               shuffle->input(0)->shape()[5] == 4;
        if (!is_nchw42nchw32 && !is_nchw322nchw4)
            return false;
        if (!uniq_reader_check(shuffle->input(0)))
            return false;

        // check reshape
M
Megvii Engine Team 已提交
2212
        auto reshape2 = try_cast_as_op<opr::Reshape>(shuffle->input(0)->owner_opr());
2213 2214 2215
        if (reshape2 == nullptr)
            return false;
        auto inp_var = rewriter.get_var(reshape2->input(0));
2216 2217 2218
        Format inp_format = is_nchw42nchw32 ? Format::NCHW4 : Format::NCHW32,
               out_format = is_nchw42nchw32 ? Format::NCHW32 : Format::NCHW4;
        auto abstract_shuffle = AbstractShuffleOpr::make(
M
Megvii Engine Team 已提交
2219 2220 2221
                inp_var, ReformatKey{
                                 opr_format_to_tensor_formats(inp_format),
                                 opr_format_to_tensor_formats(out_format)});
2222 2223
        std::string reformat_type =
                is_nchw42nchw32 ? "nchw4 -> nchw32" : "nchw32 -> nchw4";
M
Megvii Engine Team 已提交
2224 2225 2226 2227 2228 2229 2230
        rewriter.replace_var(
                opr->output(0), abstract_shuffle.node(),
                mgb_cstr_log(ssprintf(
                                     "replace reformat(%s) to "
                                     "AbstractShuffleOpr(%s).",
                                     reformat_type.c_str(), reformat_type.c_str())
                                     .c_str()));
2231 2232 2233
        return true;
    };

M
Megvii Engine Team 已提交
2234
    auto try_shuffle_reshape = [&rewriter, &uniq_reader_check](OperatorNodeBase* opr) {
2235 2236 2237 2238 2239 2240 2241 2242
        // check reshape
        auto reshape = try_cast_as_op<opr::Reshape>(opr);
        if (reshape == nullptr)
            return false;
        if (!uniq_reader_check(reshape->input(0)))
            return false;

        // check shuffle
M
Megvii Engine Team 已提交
2243
        auto shuffle = try_cast_as_op<opr::Dimshuffle>(opr->input(0)->owner_opr());
2244 2245 2246 2247 2248 2249 2250 2251 2252
        if (shuffle == nullptr)
            return false;
        auto&& param = shuffle->param();
        if (param.pattern_len != 5)
            return false;
        bool is_nchw42nchw = param.pattern[0] == 0 && param.pattern[1] == 1 &&
                             param.pattern[2] == 4 && param.pattern[3] == 2 &&
                             param.pattern[4] == 3 &&
                             shuffle->input(0)->shape()[4] == 4;
2253 2254 2255 2256 2257
        bool is_nchw42nhwc = param.pattern[0] == 0 && param.pattern[1] == 2 &&
                             param.pattern[2] == 3 && param.pattern[3] == 1 &&
                             param.pattern[4] == 4 &&
                             shuffle->input(0)->shape()[4] == 4;
        if (!is_nchw42nchw && !is_nchw42nhwc)
2258 2259
            return false;
        auto inp_var = rewriter.get_var(shuffle->input(0));
2260 2261 2262 2263 2264 2265 2266 2267 2268
        ReformatKey key;
        key.input_format = TensorFormats::NCHWc4;
        if (is_nchw42nchw) {
            key.output_format = TensorFormats::NCHW;
        } else {
            mgb_assert(is_nchw42nhwc);
            key.output_format = TensorFormats::NHWC;
        }
        auto abstract_shuffle = AbstractShuffleOpr::make(inp_var, key);
2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
        rewriter.replace_var(
                opr->output(0), abstract_shuffle.node(),
                mgb_cstr_log("replace reformat(nchw4 -> nchw) to "
                             "AbstractShuffleOpr(nchw4 -> nchw)."));
        return true;
    };

    auto try_relayout_format = [&rewriter](OperatorNodeBase* opr) {
        // check relayout format
        auto reformat = try_cast_as_op<opr::RelayoutFormat>(opr);
        if (reformat == nullptr)
            return false;
        auto&& param = reformat->param();
        if (param.mode != opr::RelayoutFormat::Param::Mode::CHWN4_NCHW4 &&
            param.mode != opr::RelayoutFormat::Param::Mode::NCHW4_CHWN4)
            return false;
        auto inp_var = rewriter.get_var(reformat->input(0));
        cg::SymbolVar abstract_shuffle;
        if (param.mode == opr::RelayoutFormat::Param::Mode::NCHW4_CHWN4) {
            abstract_shuffle = AbstractShuffleOpr::make(
M
Megvii Engine Team 已提交
2289
                    inp_var, ReformatKey{TensorFormats::NCHWc4, TensorFormats::CHWNc4});
2290 2291
        } else {
            abstract_shuffle = AbstractShuffleOpr::make(
M
Megvii Engine Team 已提交
2292
                    inp_var, ReformatKey{TensorFormats::CHWNc4, TensorFormats::NCHWc4});
2293 2294 2295 2296 2297 2298 2299 2300 2301
        }
        rewriter.replace_var(
                opr->output(0), abstract_shuffle.node(),
                mgb_cstr_log("replace reformat(nchw4 -> nchw) to "
                             "AbstractShuffleOpr(nchw4 -> nchw)."));
        return true;
    };

    auto on_opr = [&try_reshape_shuffle, &try_shuffle_reshape,
M
Megvii Engine Team 已提交
2302 2303
                   &try_reshape_shuffle_reshape, &try_relayout_format, &rewriter,
                   &uniq_reader_check](OperatorNodeBase* opr) {
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
        if (!try_reshape_shuffle_reshape(opr) && !try_reshape_shuffle(opr) &&
            !try_shuffle_reshape(opr) && !try_relayout_format(opr)) {
            auto new_opr = rewriter.auto_replace_outputs(opr);
            uniq_reader_check.update_on_opr_auto_replace(opr, new_opr);
        }
    };
    m_opt_state.graph().iter(on_opr);
    rewriter.apply_inplace();
}

void ShuffleShuffleRemovePass::Impl::do_replace() {
    auto rewriter = m_opt_state.graph().make_rewriter();
    auto uniq_reader_check = UniqReaderCheck{m_opt_state.graph()};
2317 2318
    ThinHashSet<OperatorNodeBase*> writers;
    ThinHashSet<OperatorNodeBase*> root;
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
    ThinHashSet<VarNode*> trt_opr_inps;
    SmallVector<OperatorNodeBase*> topo_order;

    auto cb = [&topo_order, &trt_opr_inps](OperatorNodeBase* opr) {
        topo_order.push_back(opr);
        MGB_MARK_USED_VAR(trt_opr_inps);
#if MGB_ENABLE_TENSOR_RT
        if (opr->same_type<opr::TensorRTOpr>()) {
            for (auto&& inp : opr->input())
                trt_opr_inps.insert(inp);
        }
#endif
    };
    m_opt_state.graph().iter(cb);

    for (auto&& opr : reverse_adaptor(topo_order)) {
M
Megvii Engine Team 已提交
2335
        if (opr->same_type<opr::TypeCvt>() || opr->same_type<AbstractShuffleOpr>()) {
2336 2337 2338 2339
            writers.insert(opr->input(0)->owner_opr());
            if (writers.count(opr) > 0) {
                if (!uniq_reader_check(opr->output(0))) {
                    root.insert(opr);
2340 2341
                }
            } else {
2342
                root.insert(opr);
2343 2344 2345 2346
            }
        }
    }

2347
    auto on_opr = [&rewriter, &uniq_reader_check, &trt_opr_inps,
2348
                   &root](OperatorNodeBase* opr) {
2349
        MGB_MARK_USED_VAR(trt_opr_inps);
M
Megvii Engine Team 已提交
2350 2351
        bool cond_opr =
                opr->same_type<opr::TypeCvt>() || opr->same_type<AbstractShuffleOpr>();
2352
        if (cond_opr) {
2353 2354
            bool cond_endpoint = root.count(opr) > 0;
            if (!cond_endpoint) {
2355
                return;
2356
            }
2357 2358 2359 2360 2361
            auto cur = opr;
            auto var = opr->output(0), inp_var = opr->input(0);
            bool force_folding_typecvt = false;
            bool first_shuffle = false;
            // initialize inp_format and out_format
M
Megvii Engine Team 已提交
2362
            TensorFormats out_format = TensorFormats::NCHW, inp_format = out_format;
2363 2364 2365 2366 2367 2368
            megdnn::DType inp_dtype = cur->input(0)->dtype(),
                          out_dtype = cur->output(0)->dtype();
            SmallVector<megdnn::DType> out_dtype_vec;
            while (cond_opr) {
                if (cur->same_type<AbstractShuffleOpr>()) {
                    auto shuffle = try_cast_as_op<AbstractShuffleOpr>(cur);
2369
                    inp_format = shuffle->key().input_format;
2370
                    if (!first_shuffle) {
2371
                        out_format = shuffle->key().output_format;
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
                        first_shuffle = true;
                    }
                } else {
                    mgb_assert(cur->same_type<opr::TypeCvt>());
                    out_dtype_vec.push_back(cur->output(0)->dtype());
                }
                inp_var = cur->input(0);
                bool cond_reader = uniq_reader_check(inp_var);
                if (!cond_reader)
                    break;
                cur = cur->input(0)->owner_opr();
                cond_opr = cur->same_type<opr::TypeCvt>() ||
                           cur->same_type<AbstractShuffleOpr>();
            }
            std::reverse(out_dtype_vec.begin(), out_dtype_vec.end());
#if MGB_ENABLE_TENSOR_RT
            force_folding_typecvt =
                    inp_var->owner_opr()->same_type<opr::TensorRTOpr>() ||
                    trt_opr_inps.count(var);
#endif
            auto new_var = rewriter.get_var(inp_var);
            if (inp_format != out_format) {
2394 2395
                new_var = ReformatManager::instance().get(
                        ReformatKey{inp_format, out_format})({new_var});
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
            }
            if (force_folding_typecvt) {
                inp_dtype = inp_var->dtype();
                if (inp_dtype != out_dtype) {
                    auto type_cvt = opr::TypeCvt::make(new_var, out_dtype);
                    new_var = type_cvt.node();
                }
            } else {
                if (out_dtype_vec.back() != var->dtype())
                    out_dtype_vec.push_back(var->dtype());
                for (auto&& dtype : out_dtype_vec) {
                    auto type_cvt = opr::TypeCvt::make(new_var, dtype);
                    new_var = type_cvt.node();
                }
            }
            rewriter.replace_var(
M
Megvii Engine Team 已提交
2412
                    var, new_var, mgb_cstr_log("replace Dimshuffle and TypeCvt chain"));
2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
        } else {
            auto new_opr = rewriter.auto_replace_outputs(opr);
            uniq_reader_check.update_on_opr_auto_replace(opr, new_opr);
        }
    };
    m_opt_state.graph().iter(on_opr);
    rewriter.apply_inplace();
}

const char* ShuffleShuffleRemovePass::name() const {
    return mgb_cstr_log("shuffle shuffle remove pass");
}

void ShuffleShuffleRemovePass::apply(OptState& opt) const {
2427
    MIDOUT_B("ShuffleShuffleRemovePass::apply")
M
Megvii Engine Team 已提交
2428 2429
    opt.set_var_replace_check_flag(
            VarReplaceCheckFlag::CHECK_SHAPE | VarReplaceCheckFlag::CHECK_DTYPE);
2430
    Impl{opt};
2431
    MIDOUT_E
2432 2433
}

2434
/* ================ EnableNCHW64Pass =============== */
M
Megvii Engine Team 已提交
2435 2436
VarNode* EnableNCHW64Pass::on_graph_endpoint_var(
        VarNode* new_var, VarNode* orig_var) const {
2437 2438
    if (!orig_var->shape().eq_shape(new_var->shape())) {
        auto iter = m_opr_format_map.find(new_var->owner_opr());
M
Megvii Engine Team 已提交
2439 2440 2441 2442 2443 2444
        mgb_assert(
                iter != m_opr_format_map.end(),
                "cannot find opr(type:%s,name:%s) information, related "
                "output var node(name:%s)",
                new_var->owner_opr()->dyn_typeinfo()->name,
                new_var->owner_opr()->cname(), new_var->cname());
2445 2446 2447 2448 2449 2450 2451 2452 2453
        const auto& fmt = iter->second;
        ReformatKey key;
        MGB_TRY {
            key.input_format = opr_format_to_tensor_formats(fmt);
            key.output_format = TensorFormats::NCHW;
            key.input_dtype = new_var->dtype().enumv();
            key.output_dtype = new_var->dtype().enumv();
        }
        MGB_CATCH(AssertionError & err, {
M
Megvii Engine Team 已提交
2454 2455
            mgb_log_error(
                    "%s, related var node(name:%s)", err.what(), orig_var->cname());
2456 2457 2458 2459 2460
            throw;
        })
        return RelayoutPlaceholder::make(new_var, key).node();
    }
    return new_var;
2461 2462
}

M
Megvii Engine Team 已提交
2463
std::unique_ptr<EnableNCHW64Pass> EnableNCHW64Pass::make_nchw64_converter() {
2464 2465
    MIDOUT_B("EnableNCHW64Pass::make")
    auto ret = std::make_unique<EnableNCHW64Pass>();
M
Megvii Engine Team 已提交
2466 2467
    ret->set_var_replace_check_flag(
            VarReplaceCheckFlag::CHECK_ALL ^ VarReplaceCheckFlag::CHECK_SHAPE);
2468 2469 2470
    auto& replace_func = ret->m_opr_replace_func;
    auto& format_map = ret->m_opr_format_map;
    auto make_new_conv = [](const VarNodeArray& inps,
M
Megvii Engine Team 已提交
2471
                            const opr::ConvBiasForward* orig_conv, Format format) {
2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
        auto param = orig_conv->param();
        // change format
        param.format = format;
        if (inps.size() == 2) {
            auto new_conv = opr::ConvBiasForward::make(
                    inps[0], inps[1], param, orig_conv->execution_policy(),
                    orig_conv->config());
            return new_conv.node();
        } else if (inps.size() == 3) {
            auto new_conv = opr::ConvBiasForward::make(
M
Megvii Engine Team 已提交
2482 2483
                    inps[0], inps[1], inps[2], param, orig_conv->execution_policy(),
                    orig_conv->config());
2484 2485 2486 2487 2488 2489 2490
            return new_conv.node();
        } else {
            mgb_assert(inps.size() == 4);
            auto new_conv = opr::ConvBiasForward::make(
                    inps[0], inps[1], inps[2], inps[3], param,
                    orig_conv->execution_policy(), orig_conv->config());
            return new_conv.node();
2491 2492
        }
    };
M
Megvii Engine Team 已提交
2493 2494 2495
    auto try_transform_to_nchw = [&format_map](
                                         OperatorNodeBase* opr,
                                         const VarNodeArray& new_inp) -> VarNode* {
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516
        mgb_assert(opr->input().size() == new_inp.size());
        bool check_dtype = new_inp[0]->dtype().enumv() == DTypeEnum::Float32 &&
                           new_inp[1]->dtype().enumv() == DTypeEnum::Float32;
        if (opr->input().size() >= 3)
            check_dtype &= new_inp[2]->dtype().enumv() == DTypeEnum::Float32;
        if (opr->input().size() >= 4)
            check_dtype &= new_inp[3]->dtype().enumv() == DTypeEnum::Float32;
        if (!check_dtype)
            return nullptr;
        auto inps = new_inp;
        auto process = [&](size_t i) -> VarNode* {
            auto iter = format_map.find(new_inp[i]->owner_opr());
            if (iter == format_map.end()) {
                return inps[i];
            } else {
                const auto& fmt = iter->second;
                ReformatKey key;
                key.input_format = opr_format_to_tensor_formats(fmt);
                key.output_format = TensorFormats::NCHW;
                return RelayoutPlaceholder::make(inps[i], key).node();
            }
2517
        };
2518 2519 2520 2521 2522
        for (size_t i = 0; i < inps.size(); ++i) {
            inps[i] = process(i);
        }
        auto ret = serialization::copy_opr_shallow(*opr, inps, opr->config());
        return ret->output()[0];
2523 2524
    };

M
Megvii Engine Team 已提交
2525 2526 2527 2528 2529 2530
    auto try_transform_to_nchw4 = [make_new_conv, &format_map](
                                          OperatorNodeBase* opr,
                                          const VarNodeArray& new_inp) -> VarNode* {
        mgb_assert(opr->input().size() == new_inp.size());
        bool check_dtype = new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS8 &&
                           new_inp[1]->dtype().enumv() == DTypeEnum::QuantizedS8;
2531 2532 2533
        mgb_assert(opr->output().size() > 0);
        bool dst_float = opr->output(0)->dtype().enumv() == DTypeEnum::Float32;
        if (opr->input().size() >= 3) {
M
Megvii Engine Team 已提交
2534 2535
            auto dtype_expect =
                    dst_float ? DTypeEnum::Float32 : DTypeEnum::QuantizedS32;
2536 2537 2538
            check_dtype &= new_inp[2]->dtype().enumv() == dtype_expect;
        }
        if (opr->input().size() >= 4) {
M
Megvii Engine Team 已提交
2539 2540
            check_dtype &=
                    new_inp[3]->dtype().enumv() == opr->output(0)->dtype().enumv();
2541
        }
2542 2543
        if (!check_dtype)
            return nullptr;
2544

2545 2546 2547
        size_t out_channels = opr->input(1)->shape()[0];
        size_t in_channels = opr->input(1)->shape()[1];
        bool check_channels = out_channels % 4 == 0 && in_channels % 4 == 0;
M
Megvii Engine Team 已提交
2548 2549 2550
        mgb_assert(
                check_channels, "invalid quantize conv bias opr(name:%s,oc:%zu,ic:%zu)",
                opr->cname(), out_channels, in_channels);
2551 2552
        auto inps = new_inp;
        auto process = [&](size_t i) -> VarNode* {
2553
            auto iter = format_map.find(new_inp[i]->owner_opr());
2554 2555
            if (iter == format_map.end()) {
                auto ovar = RelayoutPlaceholder::make(
M
Megvii Engine Team 已提交
2556 2557
                        inps[i],
                        ReformatKey{TensorFormats::NCHW, TensorFormats::NCHWc4});
2558 2559 2560 2561 2562 2563
                return ovar.node();
            } else {
                const auto& fmt = iter->second;
                if (fmt == Format::NCHW4) {
                    return inps[i];
                } else {
2564 2565 2566 2567
                    ReformatKey key;
                    key.input_format = opr_format_to_tensor_formats(fmt);
                    key.output_format = TensorFormats::NCHWc4;
                    return RelayoutPlaceholder::make(inps[i], key).node();
2568 2569 2570
                }
            }
        };
2571

2572
        for (size_t i = 0; i < inps.size(); ++i) {
2573 2574
            // do not format bias and z when dst_float is true
            bool skip = dst_float && i >= 2;
M
Megvii Engine Team 已提交
2575 2576
            if (!skip)
                inps[i] = process(i);
2577 2578
        }
        auto& conv_bias = opr->cast_final_safe<opr::ConvBiasForward>();
2579
        auto ret = make_new_conv(
M
Megvii Engine Team 已提交
2580
                inps, &conv_bias, dst_float ? Format::NCHW4_NCHW : Format::NCHW4);
2581 2582
        if (!dst_float)
            format_map.insert(std::make_pair(ret->owner_opr(), Format::NCHW4));
2583
        return ret;
2584 2585
    };

M
Megvii Engine Team 已提交
2586 2587 2588 2589 2590 2591
    auto try_transform_to_nchw32 = [make_new_conv, &format_map](
                                           OperatorNodeBase* opr,
                                           const VarNodeArray& new_inp) -> VarNode* {
        mgb_assert(opr->input().size() == new_inp.size());
        bool check_dtype = new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS8 &&
                           new_inp[1]->dtype().enumv() == DTypeEnum::QuantizedS8;
2592
        if (opr->input().size() >= 3)
M
Megvii Engine Team 已提交
2593
            check_dtype &= new_inp[2]->dtype().enumv() == DTypeEnum::QuantizedS32;
2594
        if (opr->input().size() >= 4)
M
Megvii Engine Team 已提交
2595
            check_dtype &= new_inp[3]->dtype().enumv() == DTypeEnum::QuantizedS8;
2596 2597 2598 2599 2600 2601 2602 2603 2604
        if (!check_dtype)
            return nullptr;
        size_t out_channels = opr->input(1)->shape()[0];
        size_t in_channels = opr->input(1)->shape()[1];
        bool check_channels = out_channels % 32 == 0 && in_channels % 32 == 0;
        if (!check_channels)
            return nullptr;
        auto inps = new_inp;
        auto process = [&](size_t i) -> VarNode* {
2605
            auto iter = format_map.find(new_inp[i]->owner_opr());
2606 2607
            ReformatKey key;
            key.output_format = TensorFormats::NCHWc32;
2608
            if (iter == format_map.end()) {
2609 2610
                key.input_format = TensorFormats::NCHW;
                return RelayoutPlaceholder::make(inps[i], key).node();
2611 2612 2613 2614 2615
            } else {
                const auto& fmt = iter->second;
                if (fmt == Format::NCHW32) {
                    return inps[i];
                } else {
2616 2617
                    key.input_format = opr_format_to_tensor_formats(fmt);
                    return RelayoutPlaceholder::make(inps[i], key).node();
2618 2619 2620 2621 2622 2623 2624
                }
            }
        };
        for (size_t i = 0; i < inps.size(); ++i) {
            inps[i] = process(i);
        }
        auto& conv_bias = opr->cast_final_safe<opr::ConvBiasForward>();
2625 2626 2627
        auto ret = make_new_conv(inps, &conv_bias, Format::NCHW32);
        format_map.insert(std::make_pair(ret->owner_opr(), Format::NCHW32));
        return ret;
2628 2629
    };

M
Megvii Engine Team 已提交
2630 2631 2632
    auto try_transform_to_nchw64 = [make_new_conv, &format_map](
                                           OperatorNodeBase* opr,
                                           const VarNodeArray& new_inp) -> VarNode* {
2633
        // fint4XWint4 and fuint4XWint4
M
Megvii Engine Team 已提交
2634
        mgb_assert(opr->input().size() == new_inp.size());
2635
        bool check_dtype =
2636
                (new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS4 ||
M
Megvii Engine Team 已提交
2637
                 new_inp[0]->dtype().enumv() == DTypeEnum::Quantized4Asymm) &&
2638
                new_inp[1]->dtype().enumv() == DTypeEnum::QuantizedS4;
2639
        if (opr->input().size() >= 3)
M
Megvii Engine Team 已提交
2640
            check_dtype &= new_inp[2]->dtype().enumv() == DTypeEnum::QuantizedS32;
2641
        if (opr->input().size() >= 4)
M
Megvii Engine Team 已提交
2642
            check_dtype &= new_inp[3]->dtype().enumv() == new_inp[0]->dtype().enumv();
2643 2644 2645 2646 2647 2648 2649 2650 2651
        if (!check_dtype)
            return nullptr;
        size_t out_channels = opr->input(1)->shape()[0];
        size_t in_channels = opr->input(1)->shape()[1];
        bool check_channels = out_channels % 64 == 0 && in_channels % 64 == 0;
        if (!check_channels)
            return nullptr;
        auto inps = new_inp;
        auto process = [&](size_t i) -> VarNode* {
2652
            auto iter = format_map.find(new_inp[i]->owner_opr());
2653 2654
            ReformatKey key;
            key.output_format = TensorFormats::NCHWc64;
2655
            if (iter == format_map.end()) {
2656 2657 2658
                key.input_format = TensorFormats::NCHW;
                key.input_dtype = key.output_dtype = inps[i]->dtype().enumv();
                return RelayoutPlaceholder::make(inps[i], key).node();
2659 2660 2661 2662 2663
            } else {
                const auto& fmt = iter->second;
                if (fmt == Format::NCHW64) {
                    return inps[i];
                } else {
2664 2665 2666
                    key.input_format = opr_format_to_tensor_formats(fmt);
                    key.input_dtype = key.output_dtype = inps[i]->dtype().enumv();
                    return RelayoutPlaceholder::make(inps[i], key).node();
2667 2668 2669 2670 2671 2672 2673
                }
            }
        };
        for (size_t i = 0; i < inps.size(); ++i) {
            inps[i] = process(i);
        }
        auto& conv_bias = opr->cast_final_safe<opr::ConvBiasForward>();
2674 2675 2676
        auto ret = make_new_conv(inps, &conv_bias, Format::NCHW64);
        format_map.insert(std::make_pair(ret->owner_opr(), Format::NCHW64));
        return ret;
2677 2678
    };

M
Megvii Engine Team 已提交
2679 2680 2681
    auto try_transform_to_nhwc = [make_new_conv, &format_map](
                                         OperatorNodeBase* opr,
                                         const VarNodeArray& new_inp) -> VarNode* {
2682
        // fint4XWint4 and fuint4XWint4
M
Megvii Engine Team 已提交
2683
        mgb_assert(opr->input().size() == new_inp.size());
2684 2685
        bool check_dtype =
                (new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS4 ||
M
Megvii Engine Team 已提交
2686
                 new_inp[0]->dtype().enumv() == DTypeEnum::Quantized4Asymm) &&
2687 2688
                new_inp[1]->dtype().enumv() == DTypeEnum::QuantizedS4;
        if (opr->input().size() >= 3)
M
Megvii Engine Team 已提交
2689
            check_dtype &= new_inp[2]->dtype().enumv() == DTypeEnum::QuantizedS32;
2690
        if (opr->input().size() >= 4)
M
Megvii Engine Team 已提交
2691
            check_dtype &= new_inp[3]->dtype().enumv() == new_inp[0]->dtype().enumv();
2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
        if (!check_dtype)
            return nullptr;
        size_t out_channels = opr->input(1)->shape()[0];
        size_t in_channels = opr->input(1)->shape()[1];
        bool check_channels = out_channels % 8 == 0 && in_channels % 8 == 0;
        if (!check_channels)
            return nullptr;
        auto inps = new_inp;
        auto process = [&](size_t i) -> VarNode* {
            auto iter = format_map.find(new_inp[i]->owner_opr());
2702 2703 2704
            ReformatKey key;
            key.output_format = TensorFormats::NHWC;
            key.input_dtype = key.output_dtype = inps[i]->dtype().enumv();
2705
            if (iter == format_map.end()) {
2706 2707
                key.input_format = TensorFormats::NCHW;
                return RelayoutPlaceholder::make(inps[i], key).node();
2708 2709 2710 2711 2712
            } else {
                const auto& fmt = iter->second;
                if (fmt == Format::NHWC) {
                    return inps[i];
                } else {
2713 2714
                    key.input_format = opr_format_to_tensor_formats(fmt);
                    return RelayoutPlaceholder::make(inps[i], key).node();
2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726
                }
            }
        };
        for (size_t i = 0; i < inps.size(); ++i) {
            inps[i] = process(i);
        }
        auto& conv_bias = opr->cast_final_safe<opr::ConvBiasForward>();
        auto ret = make_new_conv(inps, &conv_bias, Format::NHWC);
        format_map.insert(std::make_pair(ret->owner_opr(), Format::NHWC));
        return ret;
    };

2727 2728
    // replace rule for conv bias opr
    auto replace_conv_bias_opr = [&format_map, try_transform_to_nchw4,
M
Megvii Engine Team 已提交
2729
                                  try_transform_to_nchw32, try_transform_to_nchw64,
2730
                                  try_transform_to_nhwc, try_transform_to_nchw](
2731 2732 2733 2734 2735 2736
                                         OperatorNodeBase* opr,
                                         const VarNodeArray& new_inp) {
        using Param = megdnn::param::ConvBias;
        using Sparse = Param::Sparse;
        mgb_assert(opr->input().size() == new_inp.size());
        auto& conv_bias = opr->cast_final_safe<opr::ConvBiasForward>();
M
Megvii Engine Team 已提交
2737 2738 2739
        mgb_assert(
                conv_bias.param().sparse == Sparse::DENSE,
                "only support dense conv now");
2740 2741 2742
        VarNode* new_var = nullptr;
        if ((new_var = try_transform_to_nchw32(opr, new_inp)) ||
            (new_var = try_transform_to_nchw4(opr, new_inp)) ||
2743 2744
            (new_var = try_transform_to_nchw64(opr, new_inp)) ||
            (new_var = try_transform_to_nhwc(opr, new_inp)) ||
2745
            (new_var = try_transform_to_nchw(opr, new_inp))) {
2746 2747 2748
            return new_var->owner_opr();
        } else {
            mgb_assert(
2749
                    new_inp[0]->dtype().enumv() != DTypeEnum::QuantizedS8 &&
M
Megvii Engine Team 已提交
2750 2751
                            new_inp[0]->dtype().enumv() != DTypeEnum::QuantizedS4 &&
                            new_inp[0]->dtype().enumv() != DTypeEnum::Quantized4Asymm &&
2752 2753
                            new_inp[0]->dtype().enumv() != DTypeEnum::Float32,
                    "invalid data type(%s)", new_inp[0]->dtype().name());
2754 2755 2756 2757 2758 2759 2760
            bool shape_changed = false;
            for (const auto& i : new_inp) {
                if (format_map.count(i->owner_opr()) > 0) {
                    shape_changed = true;
                    break;
                }
            }
M
Megvii Engine Team 已提交
2761 2762 2763 2764 2765 2766
            mgb_assert(
                    !shape_changed,
                    "EnableNCHW64Pass won't change format of output tensor "
                    "of non quantized conv bias operator(name:%s)",
                    opr->cname());
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
2767 2768 2769
        }
    };
    replace_func[opr::ConvBiasForward::typeinfo()] = replace_conv_bias_opr;
M
Megvii Engine Team 已提交
2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
    replace_func[opr::ConvolutionBackwardData::typeinfo()] =
            [&format_map](OperatorNodeBase* opr, const VarNodeArray& new_inp) {
                mgb_assert(opr->input().size() == new_inp.size());
                mgb_assert(
                        new_inp.size() == 2,
                        "deconv (conv bwd data) operator for inference can "
                        "only have 2 input vars(got:%zu)",
                        new_inp.size());
                auto& deconv = opr->cast_final_safe<opr::ConvolutionBackwardData>();
                if (new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS8) {
                    Format cur;
                    auto iter = format_map.find(new_inp[1]->owner_opr());
                    if (iter == format_map.end()) {
                        cur = Format::NCHW;
                    } else {
                        cur = iter->second;
                    }
                    auto inps = new_inp;
                    inps[0] =
                            RelayoutPlaceholder::make(
                                    inps[0],
                                    ReformatKey{
                                            TensorFormats::NCHW, TensorFormats::NCHWc4})
                                    .node();
                    if (cur != Format::NCHW4) {
                        inps[1] = RelayoutPlaceholder::make(
                                          inps[1],
                                          ReformatKey{
                                                  opr_format_to_tensor_formats(cur),
                                                  TensorFormats::NCHWc4})
                                          .node();
                    }

                    auto param = deconv.param();
                    param.format = Format::NCHW4;
                    auto new_deconv = opr::ConvolutionBackwardData::make(
                            inps[0], inps[1], param, deconv.execution_policy(),
                            deconv.config());
                    auto ret = new_deconv.node()->owner_opr();
                    format_map.insert(std::make_pair(ret, Format::NCHW4));
                    return ret;
                } else {
                    bool shape_changed = false;
                    for (const auto& i : new_inp) {
                        if (format_map.count(i->owner_opr()) > 0) {
                            shape_changed = true;
                            break;
                        }
                    }
                    mgb_assert(
                            !shape_changed,
                            "EnableNCHW64Pass won't change format of output tensor "
                            "of non quantized deconv operator(name:%s)",
                            opr->cname());
                    return serialization::copy_opr_shallow(
                            *opr, new_inp, opr->config());
2826
                }
M
Megvii Engine Team 已提交
2827
            };
2828 2829

    // replace rule for elemwise like opr
M
Megvii Engine Team 已提交
2830 2831 2832
    auto replace_elemwise_like_opr = [&format_map](
                                             OperatorNodeBase* opr,
                                             const VarNodeArray& new_inp) {
2833 2834 2835 2836
        mgb_assert(opr->input().size() == new_inp.size());
        ThinHashMap<Format, size_t> format_size;
        bool same_format = true;
        bool first_touch = false;
2837
        Format format(Format::NCHW);
2838
        for (const auto& i : new_inp) {
2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
            Format cur;
            auto iter = format_map.find(i->owner_opr());
            if (iter == format_map.end()) {
                cur = Format::NCHW;
            } else {
                cur = iter->second;
            }
            auto& size = format_size[cur];
            size += i->shape().total_nr_elems();
            if (!first_touch) {
                first_touch = true;
                format = cur;
            } else {
                if (format != cur)
                    same_format = false;
            }
        }
        if (same_format) {
M
Megvii Engine Team 已提交
2857
            auto ret = serialization::copy_opr_shallow(*opr, new_inp, opr->config());
2858
            if (format != Format::NCHW)
2859 2860
                format_map.insert(std::make_pair(ret, format));
            return ret;
2861 2862
        }

2863
        Format max_format(Format::NCHW);
2864 2865 2866 2867 2868 2869 2870 2871 2872
        size_t max_size = std::numeric_limits<size_t>::min();
        for (const auto& item : format_size) {
            if (item.second > max_size) {
                max_format = item.first;
                max_size = item.second;
            }
        }
        auto inps = new_inp;
        for (size_t i = 0; i < opr->input().size(); ++i) {
2873
            auto iter = format_map.find(new_inp[i]->owner_opr());
2874 2875 2876 2877 2878 2879 2880
            Format cur;
            if (iter != format_map.end()) {
                cur = iter->second;
            } else {
                cur = Format::NCHW;
            }
            if (cur != max_format) {
M
Megvii Engine Team 已提交
2881 2882 2883
                ReformatKey key{
                        opr_format_to_tensor_formats(cur),
                        opr_format_to_tensor_formats(max_format)};
2884 2885
                key.input_dtype = key.output_dtype = inps[i]->dtype().enumv();
                inps[i] = RelayoutPlaceholder::make(inps[i], key).node();
2886 2887
            }
        }
2888
        auto ret = serialization::copy_opr_shallow(*opr, inps, opr->config());
2889
        if (max_format != Format::NCHW)
2890 2891
            format_map.insert(std::make_pair(ret, max_format));
        return ret;
2892 2893 2894 2895
    };
    // elemwise like
    replace_func[opr::Elemwise::typeinfo()] = replace_elemwise_like_opr;
    replace_func[opr::TypeCvt::typeinfo()] = replace_elemwise_like_opr;
M
Megvii Engine Team 已提交
2896
    replace_func[opr::ElemwiseMultiType::typeinfo()] = replace_elemwise_like_opr;
2897 2898 2899 2900 2901 2902 2903
    replace_func[opr::PowC::typeinfo()] = replace_elemwise_like_opr;

    auto replace_warp_perspective_opr = [&format_map](
                                                OperatorNodeBase* opr,
                                                const VarNodeArray& new_inp) {
        mgb_assert(opr->input().size() == new_inp.size());
        auto& warp = opr->cast_final_safe<opr::WarpPerspectiveForward>();
2904 2905
        if (new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS4 ||
            new_inp[0]->dtype().enumv() == DTypeEnum::Quantized4Asymm) {
2906
            Format cur;
2907
            auto iter = format_map.find(new_inp[0]->owner_opr());
2908 2909 2910 2911 2912 2913
            if (iter == format_map.end()) {
                cur = Format::NCHW;
            } else {
                cur = iter->second;
            }
            auto inps = new_inp;
2914
            if (cur != Format::NCHW64 && cur != Format::NHWC) {
M
Megvii Engine Team 已提交
2915 2916 2917
                ReformatKey key{
                        opr_format_to_tensor_formats(cur), TensorFormats::NHWC,
                        inps[0]->dtype().enumv(), inps[0]->dtype().enumv()};
2918
                inps[0] = RelayoutPlaceholder::make(inps[0], key).node();
2919
            }
2920
            auto target_format = cur == Format::NCHW64 ? cur : Format::NHWC;
2921
            auto param = warp.param();
2922
            param.format = target_format;
2923 2924 2925
            SymbolVar new_warp;
            if (inps.size() == 3) {
                new_warp = opr::WarpPerspectiveForward::make(
M
Megvii Engine Team 已提交
2926
                        inps[0], inps[1], inps[2], param, warp.config());
2927 2928 2929
            } else {
                mgb_assert(inps.size() == 4);
                new_warp = opr::WarpPerspectiveForward::make(
M
Megvii Engine Team 已提交
2930
                        inps[0], inps[1], inps[2], inps[3], param, warp.config());
2931
            }
2932
            auto ret = new_warp.node()->owner_opr();
2933
            format_map.insert(std::make_pair(ret, target_format));
2934 2935
            return ret;
        } else if (new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS8) {
2936
            Format cur;
2937
            auto iter = format_map.find(new_inp[0]->owner_opr());
2938 2939 2940 2941 2942 2943
            if (iter == format_map.end()) {
                cur = Format::NCHW;
            } else {
                cur = iter->second;
            }
            auto inps = new_inp;
2944 2945 2946
            if (cur != Format::NCHW4) {
                inps[0] = RelayoutPlaceholder::make(
                                  inps[0],
M
Megvii Engine Team 已提交
2947 2948 2949
                                  ReformatKey{
                                          opr_format_to_tensor_formats(cur),
                                          TensorFormats::NCHWc4})
2950
                                  .node();
2951 2952 2953 2954 2955 2956
            }
            auto param = warp.param();
            param.format = Format::NCHW4;
            SymbolVar new_warp;
            if (inps.size() == 3) {
                new_warp = opr::WarpPerspectiveForward::make(
M
Megvii Engine Team 已提交
2957
                        inps[0], inps[1], inps[2], param, warp.config());
2958 2959 2960
            } else {
                mgb_assert(inps.size() == 4);
                new_warp = opr::WarpPerspectiveForward::make(
M
Megvii Engine Team 已提交
2961
                        inps[0], inps[1], inps[2], inps[3], param, warp.config());
2962
            }
2963 2964 2965
            auto ret = new_warp.node()->owner_opr();
            format_map.insert(std::make_pair(ret, Format::NCHW4));
            return ret;
2966 2967 2968 2969 2970 2971 2972 2973
        } else {
            bool shape_changed = false;
            for (const auto& i : new_inp) {
                if (format_map.count(i->owner_opr()) > 0) {
                    shape_changed = true;
                    break;
                }
            }
M
Megvii Engine Team 已提交
2974 2975 2976 2977 2978 2979
            mgb_assert(
                    !shape_changed,
                    "EnableNCHW64Pass won't change format of output tensor "
                    "of non quantized warp perspective operator(name:%s)",
                    opr->cname());
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
2980 2981
        }
    };
2982 2983 2984
    auto replace_pooling_opr = [&format_map](
                                       OperatorNodeBase* opr,
                                       const VarNodeArray& new_inp) {
2985 2986
        mgb_assert(opr->input().size() == new_inp.size());
        auto& pooling = opr->cast_final_safe<opr::PoolingForward>();
2987 2988
        if (new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS4 ||
            new_inp[0]->dtype().enumv() == DTypeEnum::Quantized4Asymm) {
2989
            Format cur;
2990
            auto iter = format_map.find(new_inp[0]->owner_opr());
2991 2992 2993 2994 2995 2996
            if (iter == format_map.end()) {
                cur = Format::NCHW;
            } else {
                cur = iter->second;
            }
            auto inps = new_inp;
2997
            if (cur != Format::NCHW64 && cur != Format::NHWC) {
M
Megvii Engine Team 已提交
2998 2999 3000
                ReformatKey key{
                        opr_format_to_tensor_formats(cur), TensorFormats::NHWC,
                        inps[0]->dtype().enumv(), inps[0]->dtype().enumv()};
3001
                inps[0] = RelayoutPlaceholder::make(inps[0], key).node();
3002
            }
3003
            auto target_format = cur == Format::NCHW64 ? cur : Format::NHWC;
3004
            auto param = pooling.param();
3005
            param.format = target_format;
3006 3007
            auto new_pool = opr::PoolingForward::make(
                    inps[0], param, pooling.execution_policy(), pooling.config());
3008
            auto ret = new_pool.node()->owner_opr();
3009
            format_map.insert(std::make_pair(ret, target_format));
3010 3011
            return ret;
        } else if (new_inp[0]->dtype().enumv() == DTypeEnum::QuantizedS8) {
3012
            Format cur;
3013
            auto iter = format_map.find(new_inp[0]->owner_opr());
3014 3015 3016 3017 3018 3019 3020
            if (iter == format_map.end()) {
                cur = Format::NCHW;
            } else {
                cur = iter->second;
            }
            bool use_nchw32 = false;
            auto inps = new_inp;
3021
            ReformatKey key;
3022 3023
            switch (cur) {
                case Format::NCHW: {
3024
                    size_t in_channels = new_inp[0]->shape()[1];
3025
                    use_nchw32 = in_channels % 32 == 0;
3026
                    key.input_format = TensorFormats::NCHW;
M
Megvii Engine Team 已提交
3027 3028
                    key.output_format =
                            use_nchw32 ? TensorFormats::NCHWc32 : TensorFormats::NCHWc4;
3029
                    inps[0] = RelayoutPlaceholder::make(inps[0], key).node();
3030 3031
                    break;
                }
3032 3033 3034
                case Format::NHWC: {
                    size_t in_channels = new_inp[0]->shape()[3];
                    use_nchw32 = in_channels % 32 == 0;
3035
                    key.input_format = TensorFormats::NHWC;
M
Megvii Engine Team 已提交
3036 3037
                    key.output_format =
                            use_nchw32 ? TensorFormats::NCHWc32 : TensorFormats::NCHWc4;
3038
                    inps[0] = RelayoutPlaceholder::make(inps[0], key).node();
3039 3040
                    break;
                }
3041 3042
                case Format::NCHW64:
                    inps[0] = RelayoutPlaceholder::make(
3043
                                      inps[0],
M
Megvii Engine Team 已提交
3044 3045 3046
                                      ReformatKey{
                                              TensorFormats::NCHWc64,
                                              TensorFormats::NCHWc32})
3047 3048 3049 3050 3051 3052 3053 3054 3055
                                      .node();
                    break;
                case Format::NCHW32:
                    use_nchw32 = true;
                    break;
                default:
                    mgb_assert(cur == Format::NCHW4);
            }
            Format out_format = use_nchw32 ? Format::NCHW32 : Format::NCHW4;
M
Megvii Engine Team 已提交
3056

3057 3058
            auto param = pooling.param();
            param.format = out_format;
3059 3060
            auto new_pool = opr::PoolingForward::make(
                    inps[0], param, pooling.execution_policy(), pooling.config());
3061 3062 3063
            auto ret = new_pool.node()->owner_opr();
            format_map.insert(std::make_pair(ret, out_format));
            return ret;
3064 3065 3066 3067 3068 3069 3070 3071
        } else {
            bool shape_changed = false;
            for (const auto& i : new_inp) {
                if (format_map.count(i->owner_opr()) > 0) {
                    shape_changed = true;
                    break;
                }
            }
M
Megvii Engine Team 已提交
3072 3073 3074 3075 3076 3077
            mgb_assert(
                    !shape_changed,
                    "EnableNCHW64Pass won't change format of output tensor "
                    "of non quantized pooling operator(name:%s)",
                    opr->cname());
            return serialization::copy_opr_shallow(*opr, new_inp, opr->config());
3078 3079 3080 3081 3082 3083 3084 3085
        }
    };
    // format aware
    replace_func[opr::WarpPerspectiveForward::typeinfo()] =
            replace_warp_perspective_opr;
    replace_func[opr::PoolingForward::typeinfo()] = replace_pooling_opr;

    // to nchw
M
Megvii Engine Team 已提交
3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102
    auto replace_inps_to_nchw =
            [&format_map](OperatorNodeBase* opr, const VarNodeArray& new_inp) {
                mgb_assert(opr->input().size() == new_inp.size());
                auto inps = new_inp;
                for (size_t i = 0; i < opr->input().size(); ++i) {
                    auto iter = format_map.find(new_inp[i]->owner_opr());
                    auto fmt = iter != format_map.end() ? iter->second : Format::NCHW;
                    if (iter != format_map.end()) {
                        ReformatKey key{
                                opr_format_to_tensor_formats(fmt), TensorFormats::NCHW,
                                inps[i]->dtype().enumv(), inps[i]->dtype().enumv()};
                        inps[i] = RelayoutPlaceholder::make(inps[i], key).node();
                    }
                }
                auto ret = serialization::copy_opr_shallow(*opr, inps, opr->config());
                return ret;
            };
3103 3104 3105 3106 3107 3108 3109 3110 3111 3112

    replace_func[opr::Reduce::typeinfo()] = replace_inps_to_nchw;
    replace_func[opr::Concat::typeinfo()] = replace_inps_to_nchw;
    replace_func[opr::Reshape::typeinfo()] = replace_inps_to_nchw;
    replace_func[opr::GetVarShape::typeinfo()] = replace_inps_to_nchw;
    replace_func[opr::Dimshuffle::typeinfo()] = replace_inps_to_nchw;
    replace_func[opr::Subtensor::typeinfo()] = replace_inps_to_nchw;
    return ret;
    MIDOUT_E
}
3113

3114
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}