opr_footprint.cpp 38.5 KB
Newer Older
1 2 3
#include "megbrain/plugin/opr_footprint.h"
#include "megbrain/opr/basic_arith.h"
#include "megbrain/opr/blas.h"
M
Megvii Engine Team 已提交
4 5
#include "megbrain/opr/dnn/adaptive_pooling.h"
#include "megbrain/opr/dnn/batch_norm.h"
6 7
#include "megbrain/opr/dnn/convolution.h"
#include "megbrain/opr/dnn/images2neibs.h"
8
#include "megbrain/opr/dnn/layer_norm.h"
9 10 11
#include "megbrain/opr/dnn/local.h"
#include "megbrain/opr/dnn/lrn.h"
#include "megbrain/opr/dnn/pooling.h"
12
#include "megbrain/opr/dnn/roi_align.h"
M
Megvii Engine Team 已提交
13
#include "megbrain/opr/dnn/roi_pooling.h"
14
#include "megbrain/opr/imgproc.h"
15 16
#include "megbrain/opr/indexing.h"
#include "megbrain/opr/internal/indexing_helper.h"
17
#include "megbrain/opr/internal/indexing_helper_sereg.h"
M
Megvii Engine Team 已提交
18 19
#include "megbrain/opr/io.h"
#include "megbrain/opr/misc.h"
20
#include "megbrain/opr/nn_int.h"
M
Megvii Engine Team 已提交
21 22
#include "megbrain/opr/rand.h"
#include "megbrain/opr/standalone/nms_opr.h"
23
#include "megbrain/opr/tensor_gen.h"
M
Megvii Engine Team 已提交
24
#include "megbrain/opr/tensor_manip.h"
25
#include "megbrain/serialization/opr_load_dump.h"
26 27 28 29
#if MGB_ENABLE_JSON
#include "megdnn/opr_param_json.h"
#endif

30 31 32 33
#include "megbrain/utils/hash_ct.h"
#include "midout.h"

MIDOUT_DECL(megbrain_opr_footprint)
M
Megvii Engine Team 已提交
34
#define MIDOUT_B(...) MIDOUT_BEGIN(megbrain_opr_footprint, __VA_ARGS__) {
35 36 37 38
#define MIDOUT_E \
    }            \
    MIDOUT_END();

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
using namespace mgb;

namespace {

template <class T>
uint64_t opr_footprint_func(cg::OperatorNodeBase* opr);

// Elemwise
template <>
uint64_t opr_footprint_func<opr::Elemwise>(cg::OperatorNodeBase* opr) {
    return opr->output()[0]->shape().total_nr_elems() *
           (std::max<size_t>(opr->input().size(), 2) - 1);
}

// AddUpdate
template <>
uint64_t opr_footprint_func<opr::AddUpdate>(cg::OperatorNodeBase* opr) {
M
Megvii Engine Team 已提交
56
    mgb_assert(opr->input().size() == 2, "AddUpdate opr should have two inputs");
57 58 59 60 61
    auto&& out_shape = opr->output()[0]->shape();
    return out_shape.total_nr_elems() * 3;
}

template <class Conv>
M
Megvii Engine Team 已提交
62 63 64
uint64_t eval_conv_computation(
        const TensorShape& src_shape, const TensorShape& filter_shape,
        const TensorShape& dst_shape, cg::OperatorNodeBase* opr) {
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
    using Param = opr::ConvolutionForward::Param;
    auto&& param = opr->cast_final_safe<Conv>().param();

    if (param.format == Param::Format::NHWCD4) {
        size_t fh, fw;
        size_t group = 1;
        if (param.sparse == Param::Sparse::DENSE) {
            fh = filter_shape[1];
            fw = filter_shape[2];
            group = 1;
        } else {
            // chanwise conv
            mgb_assert(param.sparse == Param::Sparse::GROUP);
            fh = filter_shape[2];
            fw = filter_shape[3];
            group = filter_shape[0];

            if (filter_shape.ndim == 5) {
                group *= 4;
            }
        }
M
Megvii Engine Team 已提交
86
        return dst_shape.total_nr_elems() * fh * fw * src_shape[2] * 4 / group * 2;
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
    }
    auto eval_conv_computation_nchwx = [&param, &src_shape, &filter_shape,
                                        &dst_shape]() -> uint64_t {
        size_t fh, fw;
        bool hybird_nchwx = false;
        size_t group = 1;
        if (param.sparse == Param::Sparse::DENSE) {
            //! if nchwxx mode src is nchw output is nchwxx
            if (dst_shape.ndim == 5 && src_shape.ndim == 4) {
                fh = filter_shape[1];
                fw = filter_shape[2];
                hybird_nchwx = true;
            } else {
                fh = filter_shape[2];
                fw = filter_shape[3];
            }
            group = 1;
        } else {
            mgb_assert(param.sparse == Param::Sparse::GROUP);
            fh = filter_shape[3];
            fw = filter_shape[4];
            group = filter_shape[0];
        }
        if (param.format == Param::Format::NCHW88) {
111
            //! if channel wise weight layout is {group/8, FH, FW, 1, 1, 8}
112 113 114
            if (filter_shape[1] == 1 && filter_shape[2] == 1) {
                group *= 8;
            }
M
Megvii Engine Team 已提交
115 116
            size_t computation =
                    dst_shape.total_nr_elems() * fh * fw * src_shape[1] / group * 2;
117 118
            return hybird_nchwx ? computation : computation * 8;
        }
119 120
        if (param.format == Param::Format::NCHW44 ||
            param.format == Param::Format::NCHW44_DOT) {
121
            //! if channel wise weight layout is {group/4, FH, FW, 1, 1, 4}
122 123
            if (filter_shape[1] == 1 && filter_shape[2] == 1 &&
                filter_shape.ndim == 6) {
124 125
                group *= 4;
            }
M
Megvii Engine Team 已提交
126 127
            size_t computation =
                    dst_shape.total_nr_elems() * fh * fw * src_shape[1] / group * 2;
128 129
            return hybird_nchwx ? computation : computation * 4;
        }
130 131 132
        size_t packed_size;
        if (param.format == Param::Format::NCHW64) {
            packed_size = 64;
M
Megvii Engine Team 已提交
133 134 135
        } else if (
                param.format == Param::Format::NCHW32 ||
                param.format == Param::Format::NCHW32_NCHW4) {
136 137
            packed_size = 32;
        } else {
M
Megvii Engine Team 已提交
138 139 140 141 142 143 144
            mgb_assert(
                    param.format == Param::Format::NCHW4 ||
                            param.format == Param::Format::NCHW4_NHWC ||
                            param.format == Param::Format::NCHW4_NCHW ||
                            param.format == Param::Format::NCHW4_NCHW32,
                    "format should be "
                    "NCHW4/NCHW4_NCHW/NCHW4_NHWC/NCHW4_NCHW32");
145
            packed_size = 4;
146
        }
M
Megvii Engine Team 已提交
147 148
        return dst_shape.total_nr_elems() * fh * fw * src_shape[1] * packed_size /
               group * 2;
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
    };
    auto eval_conv_computation_chwn4 = [&param, &src_shape, &filter_shape,
                                        &dst_shape]() -> uint64_t {
        size_t fh, fw;
        size_t group = 1;
        if (param.sparse == Param::Sparse::DENSE) {
            fh = filter_shape[1];
            fw = filter_shape[2];
            group = 1;
        } else {
            mgb_assert(param.sparse == Param::Sparse::GROUP);
            fh = filter_shape[2];
            fw = filter_shape[3];
            group = filter_shape[0];
        }
M
Megvii Engine Team 已提交
164
        return dst_shape.total_nr_elems() * fh * fw * src_shape[0] * 4 / group * 2;
165 166
    };
    if (param.format == Param::Format::NCHW4 ||
167
        param.format == Param::Format::NCHW4_NCHW ||
168
        param.format == Param::Format::NCHW4_NHWC ||
169
        param.format == Param::Format::NCHW4_NCHW32 ||
170
        param.format == Param::Format::NCHW88 ||
171
        param.format == Param::Format::NCHW44 ||
172
        param.format == Param::Format::NCHW44_DOT ||
173
        param.format == Param::Format::NCHW32 ||
174 175
        param.format == Param::Format::NCHW32_NCHW4 ||
        param.format == Param::Format::NCHW64) {
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
        return eval_conv_computation_nchwx();
    }
    if (param.format == Param::Format::CHWN4) {
        return eval_conv_computation_chwn4();
    }
    size_t cpos;
    size_t spatial_start;
    size_t group = 1;
    switch (param.format) {
        case Param::Format::NCHW:
            cpos = 1;
            spatial_start = 2;
            break;
        case Param::Format::NHWC:
            cpos = 3;
            spatial_start = 1;
            break;
        default:
            mgb_assert(false, "Unknown CONV Param::Format type");
    }
    switch (param.sparse) {
        case Param::Sparse::DENSE:
M
Megvii Engine Team 已提交
198 199 200 201
            mgb_assert(
                    filter_shape.ndim == 4 || filter_shape.ndim == 6,
                    "DENSE conv filter shape dimension should be "
                    "4/6(winograd mk4)");
202 203
            break;
        case Param::Sparse::GROUP:
M
Megvii Engine Team 已提交
204 205 206 207
            mgb_assert(
                    filter_shape.ndim == 5 || filter_shape.ndim == 7,
                    "GROUP conv filter shape dimension should be "
                    "5/7(winograd mk4)");
208 209 210 211 212 213 214 215 216
            spatial_start++;
            group = filter_shape[0];
            break;
        default:
            mgb_assert(false, "Unkown CONV Param::Sparse type");
    }

    uint64_t fh = static_cast<uint64_t>(filter_shape[spatial_start]);
    uint64_t fw = static_cast<uint64_t>(filter_shape[spatial_start + 1]);
217

218
    // mul and add are counted as 2 operations
219

220 221 222 223 224 225
    return dst_shape.total_nr_elems() * fh * fw *
           static_cast<uint64_t>(src_shape[cpos]) / group * 2;
}

// ConvolutionForward
template <>
M
Megvii Engine Team 已提交
226 227
uint64_t opr_footprint_func<opr::ConvolutionForward>(cg::OperatorNodeBase* opr) {
    mgb_assert(opr->input().size() == 2, "ConvolutionFwd opr should have two inputs");
228 229 230 231 232 233 234
    auto&& out_shape = opr->output()[0]->shape();
    auto&& src_shape = opr->input()[0]->shape();
    auto&& filter_shape = opr->input()[1]->shape();
    return eval_conv_computation<opr::ConvolutionForward>(
            src_shape, filter_shape, out_shape, opr);
}
template <>
M
Megvii Engine Team 已提交
235 236 237 238 239
uint64_t opr_footprint_func<opr::ConvBiasForward>(cg::OperatorNodeBase* opr) {
    mgb_assert(
            opr->input().size() == 2 || opr->input().size() == 3 ||
                    opr->input().size() == 4,
            "ConvBiasForward opr should have two/three/four inputs");
240 241 242 243 244 245 246 247 248 249 250 251 252
    auto&& out_shape = opr->output()[0]->shape();
    auto&& src_shape = opr->input()[0]->shape();
    auto&& filter_shape = opr->input()[1]->shape();
    uint64_t res = eval_conv_computation<opr::ConvBiasForward>(
            src_shape, filter_shape, out_shape, opr);
    if (opr->input().size() == 3) {
        res += out_shape.total_nr_elems();
    }
    return res;
}

// ConvolutionBackwardData
template <>
M
Megvii Engine Team 已提交
253 254 255 256
uint64_t opr_footprint_func<opr::ConvolutionBackwardData>(cg::OperatorNodeBase* opr) {
    mgb_assert(
            opr->input().size() == 2 || opr->input().size() == 3,
            "ConvolutionBackwardData opr should have two or three inputs");
257 258 259 260 261 262 263 264 265
    auto&& filter_shape = opr->input()[0]->shape();
    auto&& diff_shape = opr->input()[1]->shape();
    auto&& grad_shape = opr->output()[0]->shape();
    return eval_conv_computation<opr::ConvolutionBackwardData>(
            grad_shape, filter_shape, diff_shape, opr);
}

// ConvolutionBackwardFilter
template <>
M
Megvii Engine Team 已提交
266 267 268 269
uint64_t opr_footprint_func<opr::ConvolutionBackwardFilter>(cg::OperatorNodeBase* opr) {
    mgb_assert(
            opr->input().size() == 3,
            "ConvolutionBackwardData opr should have three inputs");
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
    auto&& filter_shape = opr->input()[2]->shape();
    auto&& diff_shape = opr->input()[1]->shape();
    auto&& src_shape = opr->input()[0]->shape();
    return eval_conv_computation<opr::ConvolutionBackwardFilter>(
            src_shape, filter_shape, diff_shape, opr);
}

// MatrixMul
template <>
uint64_t opr_footprint_func<opr::MatrixMul>(cg::OperatorNodeBase* opr) {
    auto&& mopr = opr->cast_final_safe<opr::MatrixMul>();
    auto &&i0 = opr->input(0)->shape(), &&i1 = opr->input(1)->shape();
    mgb_assert(i0.ndim == 2 && i1.ndim == 2);
    auto m = i0[0], k0 = i0[1], k1 = i1[0], n = i1[1];
    if (mopr.param().transposeA) {
        std::swap(m, k0);
    }
    if (mopr.param().transposeB) {
        std::swap(k1, n);
    }
    mgb_assert(k0 == k1);
    // mul and add are counted as 2 operations
    return m * k0 * n * 2;
}

template <>
uint64_t opr_footprint_func<opr::LocalShareForward>(cg::OperatorNodeBase* opr) {
M
Megvii Engine Team 已提交
297
    mgb_assert(opr->input().size() == 2, "LocalShare opr should have two inputs");
298 299 300 301 302 303 304 305 306 307 308 309
    auto&& out_shape = opr->output()[0]->shape();
    auto&& src_shape = opr->input()[0]->shape();
    auto&& filter_shape = opr->input()[1]->shape();
    using Param = opr::LocalShareForward::Param;
    auto&& param = opr->cast_final_safe<opr::LocalShareForward>().param();
    mgb_assert(param.format == Param::Format::NCHW);
    size_t groups = 1;
    size_t kern_spatial_pos = 3;
    if (param.sparse == Param::Sparse::GROUP) {
        groups = filter_shape[0];
        kern_spatial_pos = 4;
    }
M
Megvii Engine Team 已提交
310
    size_t fh = filter_shape[kern_spatial_pos], fw = filter_shape[kern_spatial_pos + 1];
311 312 313 314 315
    return out_shape.total_nr_elems() * fh * fw * src_shape[1] * 2 / groups;
}

template <>
uint64_t opr_footprint_func<opr::LocalShareBackwardData>(cg::OperatorNodeBase* opr) {
M
Megvii Engine Team 已提交
316 317 318
    mgb_assert(
            opr->input().size() == 3,
            "LocalShareBackwardData opr should have three inputs");
319 320 321 322 323 324 325 326 327 328 329 330
    auto&& filter_shape = opr->input()[0]->shape();
    auto&& diff_shape = opr->input()[1]->shape();
    auto&& grad_shape = opr->output()[0]->shape();
    using Param = opr::LocalShareForward::Param;
    auto&& param = opr->cast_final_safe<opr::LocalShareBackwardData>().param();
    mgb_assert(param.format == Param::Format::NCHW);
    size_t groups = 1;
    size_t kern_spatial_pos = 3;
    if (param.sparse == Param::Sparse::GROUP) {
        groups = filter_shape[0];
        kern_spatial_pos = 4;
    }
M
Megvii Engine Team 已提交
331
    size_t fh = filter_shape[kern_spatial_pos], fw = filter_shape[kern_spatial_pos + 1];
332 333 334 335 336
    return diff_shape.total_nr_elems() * fh * fw * grad_shape[1] * 2 / groups;
}

template <>
uint64_t opr_footprint_func<opr::LocalShareBackwardFilter>(cg::OperatorNodeBase* opr) {
M
Megvii Engine Team 已提交
337 338 339
    mgb_assert(
            opr->input().size() == 3,
            "LocalShareBackwardFilter opr should have three inputs");
340 341 342 343 344 345 346 347 348 349 350 351
    auto&& src_shape = opr->input()[0]->shape();
    auto&& diff_shape = opr->input()[1]->shape();
    auto&& grad_shape = opr->output()[0]->shape();
    using Param = opr::LocalShareForward::Param;
    auto&& param = opr->cast_final_safe<opr::LocalShareBackwardFilter>().param();
    mgb_assert(param.format == Param::Format::NCHW);
    size_t groups = 1;
    size_t kern_spatial_pos = 3;
    if (param.sparse == Param::Sparse::GROUP) {
        groups = grad_shape[0];
        kern_spatial_pos = 4;
    }
M
Megvii Engine Team 已提交
352
    size_t fh = grad_shape[kern_spatial_pos], fw = grad_shape[kern_spatial_pos + 1];
353 354 355 356
    return diff_shape.total_nr_elems() * fh * fw * src_shape[1] * 2 / groups;
}

template <>
M
Megvii Engine Team 已提交
357 358 359 360
uint64_t opr_footprint_func<opr::DeformableConvForward>(cg::OperatorNodeBase* opr) {
    mgb_assert(
            opr->input().size() == 4,
            "DeformableConvForward opr should have four inputs");
361 362 363
    auto&& out_shape = opr->output()[0]->shape();
    auto&& filter_shape = opr->input()[1]->shape();
    using Param = opr::DeformableConvForward::Param;
364
    auto&& param = opr->cast_final_safe<opr::DeformableConvForward>().param();
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
    size_t fh, fw, icpg;
    mgb_assert(param.format == Param::Format::NCHW);
    if (param.sparse == Param::Sparse::GROUP) {
        icpg = filter_shape[2];
        fh = filter_shape[3], fw = filter_shape[4];
    } else {
        icpg = filter_shape[1];
        fh = filter_shape[2], fw = filter_shape[3];
    }
    //! conv(1 mul), mask(1, mul), accumulate(1 add)
    return out_shape.total_nr_elems() * fh * fw * icpg * 3;
}

template <>
uint64_t opr_footprint_func<opr::DeformableConvBackwardFilter>(
        cg::OperatorNodeBase* opr) {
M
Megvii Engine Team 已提交
381 382 383
    mgb_assert(
            opr->input().size() == 5,
            "DeformableConvBackwardFilter opr should have four inputs");
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
    auto&& out_shape = opr->output()[0]->shape();
    auto&& filter_shape = opr->input()[1]->shape();
    using Param = opr::DeformableConvBackwardFilter::Param;
    auto&& param = opr->cast_final_safe<opr::Convolution>().param();
    size_t fh, fw, icpg;
    mgb_assert(param.format == Param::Format::NCHW);
    if (param.sparse == Param::Sparse::GROUP) {
        icpg = filter_shape[2];
        fh = filter_shape[3], fw = filter_shape[4];
    } else {
        icpg = filter_shape[1];
        fh = filter_shape[2], fw = filter_shape[3];
    }
    //! deconv(1 mul), mask(1 mul), accumulate(1 add), bilinear(4 add, 4mul,
    //! skip)
    return out_shape.total_nr_elems() * fh * fw * icpg * 3;
}

template <>
uint64_t opr_footprint_func<opr::DeformableConvBackwardData>(
        cg::OperatorNodeBase* opr) {
M
Megvii Engine Team 已提交
405 406 407
    mgb_assert(
            opr->input().size() == 5,
            "DeformableConvBackwardData opr should have four inputs");
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
    auto&& out_shape = opr->output()[0]->shape();
    auto&& filter_shape = opr->input()[1]->shape();
    using Param = opr::DeformableConvForward::Param;
    auto&& param = opr->cast_final_safe<opr::Convolution>().param();
    size_t fh, fw, icpg;
    mgb_assert(param.format == Param::Format::NCHW);
    if (param.sparse == Param::Sparse::GROUP) {
        icpg = filter_shape[2];
        fh = filter_shape[3], fw = filter_shape[4];
    } else {
        icpg = filter_shape[1];
        fh = filter_shape[2], fw = filter_shape[3];
    }
    //! deconv(1 mul), mask(1 mul), accumulate(1 add), grad_weight(1 mul, skip),
    //! grad_coord(4mul, 4 add)
    return out_shape.total_nr_elems() * fh * fw * icpg * 12;
}

template <>
M
Megvii Engine Team 已提交
427 428 429 430 431
uint64_t opr_footprint_func<opr::BatchConvBiasForward>(cg::OperatorNodeBase* opr) {
    mgb_assert(
            opr->input().size() == 2 || opr->input().size() == 3 ||
                    opr->input().size() == 4,
            "BatchConvBias opr should have two/three/four inputs");
432 433 434 435 436
    auto&& out_shape = opr->output()[0]->shape();
    auto&& src_shape = opr->input()[0]->shape();
    auto&& filter_shape = opr->input()[1]->shape();
    using Param = opr::BatchConvBiasForward::Param;
    auto&& param = opr->cast_final_safe<opr::BatchConvBiasForward>().param();
437
    size_t packed_channels = 1;
438
    size_t kern_spatial_pos = 3;
439 440 441
    if (param.format == Param::Format::NCHW4) {
        packed_channels = 4;
    }
M
Megvii Engine Team 已提交
442 443
    size_t fh = filter_shape[kern_spatial_pos], fw = filter_shape[kern_spatial_pos + 1];
    return out_shape.total_nr_elems() * fh * fw * src_shape[1] * packed_channels * 2;
444 445 446 447 448 449 450 451 452 453
}

// Pooling
template <>
uint64_t opr_footprint_func<opr::PoolingForward>(cg::OperatorNodeBase* opr) {
    auto&& param = opr->cast_final_safe<opr::PoolingForward>().param();
    auto area = param.window_h * param.window_w;
    return opr->output(0)->shape().total_nr_elems() * area;
}

454 455 456 457 458 459 460 461
// PoolingBackWard
template <>
uint64_t opr_footprint_func<opr::PoolingBackward>(cg::OperatorNodeBase* opr) {
    auto&& param = opr->cast_final_safe<opr::PoolingBackward>().param();
    auto area = param.window_h * param.window_w;
    return opr->input()[0]->shape().total_nr_elems() * area;
}

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
// Concat
template <>
uint64_t opr_footprint_func<opr::Concat>(cg::OperatorNodeBase* opr) {
    auto&& out_shape = opr->output()[0]->shape();
    return out_shape.total_nr_elems();
}

// Dimshuffle
template <>
uint64_t opr_footprint_func<opr::Dimshuffle>(cg::OperatorNodeBase* opr) {
    auto&& out = opr->output()[0];
    return out->shape().total_nr_elems();
}

// Reduce
template <>
uint64_t opr_footprint_func<opr::Reduce>(cg::OperatorNodeBase* opr) {
    return opr->input()[0]->shape().total_nr_elems();
}

// Host2DeviceCopy
template <>
uint64_t opr_footprint_func<opr::Host2DeviceCopy>(cg::OperatorNodeBase* opr) {
    auto&& out_shape = opr->output()[0]->shape();
    return out_shape.total_nr_elems();
}

/******************* Registe Param Json Functions *************************/
#if MGB_ENABLE_JSON
template <class T>
std::shared_ptr<json::Value> opr_param_json_func(cg::OperatorNodeBase* opr);

494 495 496 497 498 499 500 501 502 503 504
template <class T>
std::shared_ptr<json::Value> serial_param_json_func(
        serialization::OprLoadContextRawPOD& context);

#define REGISTE_SERIAL_PARAM_JSON_FUNC(cls)                                   \
    template <>                                                               \
    std::shared_ptr<json::Value> serial_param_json_func<opr::cls>(            \
            serialization::OprLoadContextRawPOD & context) {                  \
        return opr::opr_param_to_json(context.read_param<opr::cls::Param>()); \
    }

M
Megvii Engine Team 已提交
505 506 507 508 509
#define REGISTE_PARAM_JSON_FUNC(cls)                                             \
    template <>                                                                  \
    std::shared_ptr<json::Value> opr_param_json_func<opr::cls>(                  \
            cg::OperatorNodeBase * opr) {                                        \
        return opr::opr_param_to_json(opr->cast_final_safe<opr::cls>().param()); \
510 511
    }                                                                            \
    REGISTE_SERIAL_PARAM_JSON_FUNC(cls)
512 513 514 515 516 517 518 519 520 521 522 523 524

REGISTE_PARAM_JSON_FUNC(Elemwise)
REGISTE_PARAM_JSON_FUNC(ConvolutionForward)
REGISTE_PARAM_JSON_FUNC(Convolution3D)
REGISTE_PARAM_JSON_FUNC(ConvBiasForward)
REGISTE_PARAM_JSON_FUNC(ConvolutionBackwardData)
REGISTE_PARAM_JSON_FUNC(Convolution3DBackwardData)
REGISTE_PARAM_JSON_FUNC(ConvolutionBackwardFilter)
REGISTE_PARAM_JSON_FUNC(MatrixMul)
REGISTE_PARAM_JSON_FUNC(BatchedMatrixMul)
REGISTE_PARAM_JSON_FUNC(Dot)
REGISTE_PARAM_JSON_FUNC(MatrixInverse)
REGISTE_PARAM_JSON_FUNC(PoolingForward)
525
REGISTE_PARAM_JSON_FUNC(PoolingBackward)
526 527 528 529 530 531 532 533 534 535 536 537 538 539
REGISTE_PARAM_JSON_FUNC(SVD)
REGISTE_PARAM_JSON_FUNC(MaskConvolution)
REGISTE_PARAM_JSON_FUNC(Images2Neibs)
REGISTE_PARAM_JSON_FUNC(Local)
REGISTE_PARAM_JSON_FUNC(GroupLocal)
REGISTE_PARAM_JSON_FUNC(LRN)
REGISTE_PARAM_JSON_FUNC(Concat)
REGISTE_PARAM_JSON_FUNC(Reduce)
REGISTE_PARAM_JSON_FUNC(LocalShareForward)
REGISTE_PARAM_JSON_FUNC(LocalShareBackwardData)
REGISTE_PARAM_JSON_FUNC(LocalShareBackwardFilter)
REGISTE_PARAM_JSON_FUNC(DeformableConvForward)
REGISTE_PARAM_JSON_FUNC(DeformableConvBackwardFilter)
REGISTE_PARAM_JSON_FUNC(DeformableConvBackwardData)
540
REGISTE_PARAM_JSON_FUNC(DeformablePSROIPoolingForward)
541
REGISTE_PARAM_JSON_FUNC(BatchConvBiasForward)
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
REGISTE_PARAM_JSON_FUNC(BatchNormForward)
REGISTE_PARAM_JSON_FUNC(ElemwiseMultiType)
REGISTE_PARAM_JSON_FUNC(Argsort)
REGISTE_PARAM_JSON_FUNC(Argmax)
REGISTE_PARAM_JSON_FUNC(Argmin)
REGISTE_PARAM_JSON_FUNC(AdaptivePooling)
REGISTE_PARAM_JSON_FUNC(ROIPooling)
REGISTE_PARAM_JSON_FUNC(ROIAlign)
REGISTE_PARAM_JSON_FUNC(WarpPerspective)
REGISTE_PARAM_JSON_FUNC(WarpAffine)
REGISTE_PARAM_JSON_FUNC(Remap)
REGISTE_PARAM_JSON_FUNC(Resize)
REGISTE_PARAM_JSON_FUNC(IndexingOneHot)
REGISTE_PARAM_JSON_FUNC(IndexingSetOneHot)
REGISTE_PARAM_JSON_FUNC(TopK)
REGISTE_PARAM_JSON_FUNC(UniformRNG)
REGISTE_PARAM_JSON_FUNC(GaussianRNG)
REGISTE_PARAM_JSON_FUNC(Linspace)
REGISTE_PARAM_JSON_FUNC(Eye)
REGISTE_PARAM_JSON_FUNC(CvtColor)
562 563 564
REGISTE_PARAM_JSON_FUNC(LayerNormBackward)
REGISTE_PARAM_JSON_FUNC(AdaptivePoolingBackward)
REGISTE_PARAM_JSON_FUNC(DropoutBackward)
565

566 567
std::shared_ptr<json::Value> dimshuffle_param2json(
        const opr::Dimshuffle::Param& param) {
M
Megvii Engine Team 已提交
568 569 570
    auto pattern = json::Array::make();
    for (size_t i = 0; i < param.pattern_len; i++)
        pattern->add(json::NumberInt::make(param.pattern[i]));
571

M
Megvii Engine Team 已提交
572
    return json::Object::make({
573 574
            {"ndim", json::NumberInt::make(param.ndim)},
            {"pattern", pattern},
M
Megvii Engine Team 已提交
575 576
    });
}
577 578

template <>
579
std::shared_ptr<json::Value> opr_param_json_func<opr::Dimshuffle>(
M
Megvii Engine Team 已提交
580
        cg::OperatorNodeBase* opr) {
581 582 583 584 585 586 587 588
    auto param = opr->cast_final_safe<opr::Dimshuffle>().param();
    return dimshuffle_param2json(param);
}
template <>
std::shared_ptr<json::Value> serial_param_json_func<opr::Dimshuffle>(
        serialization::OprLoadContextRawPOD& context) {
    return dimshuffle_param2json(context.read_param<opr::Dimshuffle::Param>());
}
589

590 591
std::shared_ptr<json::Value> axis_add_remove_param2json(
        const opr::AxisAddRemove::Param& param) {
M
Megvii Engine Team 已提交
592 593 594 595 596 597 598 599 600 601 602
    auto desc = json::Array::make();
    for (size_t i = 0; i < param.nr_desc; i++) {
        auto axisdesc = param.desc[i];
        desc->add(json::Object::make({
                {"method",
                 json::NumberInt::make(static_cast<int32_t>(axisdesc.method))},
                {"axisnum", json::NumberInt::make(axisdesc.axis.get_raw())},
        }));
    }

    return json::Object::make({
603 604
            {"nr_desc", json::NumberInt::make(param.nr_desc)},
            {"desc", desc},
M
Megvii Engine Team 已提交
605 606
    });
}
607

608 609 610 611 612 613 614 615 616 617 618 619 620
template <>
std::shared_ptr<json::Value> opr_param_json_func<opr::AxisAddRemove>(
        cg::OperatorNodeBase* opr) {
    auto param = opr->cast_final_safe<opr::AxisAddRemove>().param();
    return axis_add_remove_param2json(param);
}

template <>
std::shared_ptr<json::Value> serial_param_json_func<opr::AxisAddRemove>(
        serialization::OprLoadContextRawPOD& context) {
    return axis_add_remove_param2json(context.read_param<opr::AxisAddRemove::Param>());
}

621 622 623 624 625 626
std::shared_ptr<json::Value> indexing_param_to_json(
        const std::vector<opr::indexing::AxisIndexer>& indices) {
    auto desc = json::Array::make();
    for (auto& index : indices) {
        desc->add(json::Object::make({
                {"axis", json::NumberInt::make(index.axis.get_raw())},
M
Megvii Engine Team 已提交
627
                {"begin", json::NumberInt::make(index.begin.node() != nullptr)},
628
                {"end", json::NumberInt::make(index.end.node() != nullptr)},
M
Megvii Engine Team 已提交
629
                {"step", json::NumberInt::make(index.step.node() != nullptr)},
630 631 632 633 634 635
                {"idx", json::NumberInt::make(index.idx.node() != nullptr)},
        }));
    }
    return desc;
}

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
#define REGISTE_INDEXING_PARAM_JSON_FUNC(cls)                                  \
    template <>                                                                \
    std::shared_ptr<json::Value> opr_param_json_func<opr::cls>(                \
            cg::OperatorNodeBase * opr) {                                      \
        auto indices = opr->cast_final_safe<opr::cls>().index_desc();          \
        return indexing_param_to_json(indices);                                \
    }                                                                          \
    template <>                                                                \
    std::shared_ptr<json::Value> serial_param_json_func<opr::cls>(             \
            serialization::OprLoadContextRawPOD & context) {                   \
        auto indices = context.read_param<serialization::IndexDescMaskDump>(); \
        auto desc = json::Array::make();                                       \
        for (size_t i = 0; i < indices.nr_item; i++) {                         \
            auto&& index = indices.items[i];                                   \
            desc->add(json::Object::make({                                     \
                    {"axis", json::NumberInt::make(index.axis)},               \
                    {"begin", json::NumberInt::make(index.begin)},             \
                    {"end", json::NumberInt::make(index.end)},                 \
                    {"step", json::NumberInt::make(index.step)},               \
                    {"idx", json::NumberInt::make(index.idx)},                 \
            }));                                                               \
        }                                                                      \
        return desc;                                                           \
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
    }

REGISTE_INDEXING_PARAM_JSON_FUNC(Subtensor);
REGISTE_INDEXING_PARAM_JSON_FUNC(SetSubtensor);
REGISTE_INDEXING_PARAM_JSON_FUNC(IncrSubtensor);
REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingMultiAxisVec);
REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingSetMultiAxisVec);
REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingIncrMultiAxisVec);
REGISTE_INDEXING_PARAM_JSON_FUNC(MeshIndexing);
REGISTE_INDEXING_PARAM_JSON_FUNC(IncrMeshIndexing);
REGISTE_INDEXING_PARAM_JSON_FUNC(SetMeshIndexing);
REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedMeshIndexing);
REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedIncrMeshIndexing);
REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedSetMeshIndexing);

674
std::shared_ptr<json::Value> reshape_param2json(const opr::Reshape::Param& param) {
M
Megvii Engine Team 已提交
675
    auto desc = json::Array::make();
676
    if (param.axis != param.MAX_NDIM) {
M
Megvii Engine Team 已提交
677
        return json::Object::make({
678
                {"axis", json::NumberInt::make(param.axis)},
M
Megvii Engine Team 已提交
679 680 681
        });
    } else {
        return json::Object::make();
682
    }
M
Megvii Engine Team 已提交
683
}
684

685
template <>
686
std::shared_ptr<json::Value> opr_param_json_func<opr::Reshape>(
M
Megvii Engine Team 已提交
687
        cg::OperatorNodeBase* opr) {
688 689 690 691 692 693 694 695 696 697 698 699
    auto axis_param = opr->cast_final_safe<opr::Reshape>().param();
    return reshape_param2json(axis_param);
}

template <>
std::shared_ptr<json::Value> serial_param_json_func<opr::Reshape>(
        serialization::OprLoadContextRawPOD& context) {
    return reshape_param2json(context.read_param<opr::Reshape::Param>());
}

std::shared_ptr<json::Value> getvarshape_param2json(
        const opr::GetVarShape::Param& param) {
M
Megvii Engine Team 已提交
700
    auto desc = json::Array::make();
701
    if (param.axis != param.MAX_NDIM) {
M
Megvii Engine Team 已提交
702
        return json::Object::make({
703
                {"axis", json::NumberInt::make(param.axis)},
M
Megvii Engine Team 已提交
704 705 706
        });
    } else {
        return json::Object::make();
707
    }
M
Megvii Engine Team 已提交
708
}
709 710

template <>
711
std::shared_ptr<json::Value> opr_param_json_func<opr::GetVarShape>(
M
Megvii Engine Team 已提交
712
        cg::OperatorNodeBase* opr) {
713 714 715 716 717 718 719 720 721 722 723 724
    auto axis_param = opr->cast_final_safe<opr::GetVarShape>().param();
    return getvarshape_param2json(axis_param);
}

template <>
std::shared_ptr<json::Value> serial_param_json_func<opr::GetVarShape>(
        serialization::OprLoadContextRawPOD& context) {
    return getvarshape_param2json(context.read_param<opr::GetVarShape::Param>());
}

std::shared_ptr<json::Value> nmskeep_param2json(
        const opr::standalone::NMSKeep::Param& param) {
M
Megvii Engine Team 已提交
725
    return json::Object::make({
726 727
            {"iou_thresh", json::Number::make(param.iou_thresh)},
            {"max_output", json::Number::make(param.max_output)},
M
Megvii Engine Team 已提交
728 729
    });
}
730

731 732 733 734 735 736 737 738 739 740 741 742 743
template <>
std::shared_ptr<json::Value> opr_param_json_func<opr::standalone::NMSKeep>(
        cg::OperatorNodeBase* opr) {
    auto nms_param = opr->cast_final_safe<opr::standalone::NMSKeep>().param();
    return nmskeep_param2json(nms_param);
}

template <>
std::shared_ptr<json::Value> serial_param_json_func<opr::standalone::NMSKeep>(
        serialization::OprLoadContextRawPOD& context) {
    return nmskeep_param2json(context.read_param<opr::standalone::NMSKeep::Param>());
}

M
Megvii Engine Team 已提交
744
#endif  // MGB_ENABLE_JSON
745 746 747 748 749

}  // namespace

template <class OprType>
void OprFootprint::add_single_comp_footprint() {
M
Megvii Engine Team 已提交
750 751 752 753
    MIDOUT_B(
            OprType, midout_iv(MGB_HASH_STR("OprFootprint::add_single_comp_footprint")))
    auto&& record = m_type2comp_footprint.emplace(
            OprType::typeinfo(), opr_footprint_func<OprType>);
754
    mgb_assert(record.second, "duplicate opr typeinfo");
755
    MIDOUT_E
756 757 758 759 760
}

#if MGB_ENABLE_JSON
template <class OprType>
void OprFootprint::add_single_param_json() {
M
Megvii Engine Team 已提交
761 762
    auto&& record = m_type2param_json.emplace(
            OprType::typeinfo(), opr_param_json_func<OprType>);
763
    mgb_assert(record.second, "duplicate opr typeinfo");
764 765 766
    auto&& record1 = m_type2serialparam_json.emplace(
            OprType::typeinfo(), serial_param_json_func<OprType>);
    mgb_assert(record1.second, "duplicate opr typeinfo");
767 768 769 770 771 772 773 774 775 776 777 778
}
#endif

void OprFootprint::init_all_footprints() {
    add_single_comp_footprint<opr::Elemwise>();
    add_single_comp_footprint<opr::AddUpdate>();
    add_single_comp_footprint<opr::ConvolutionForward>();
    add_single_comp_footprint<opr::ConvBiasForward>();
    add_single_comp_footprint<opr::ConvolutionBackwardData>();
    add_single_comp_footprint<opr::ConvolutionBackwardFilter>();
    add_single_comp_footprint<opr::MatrixMul>();
    add_single_comp_footprint<opr::PoolingForward>();
779
    add_single_comp_footprint<opr::PoolingBackward>();
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
    add_single_comp_footprint<opr::Concat>();
    add_single_comp_footprint<opr::Dimshuffle>();
    add_single_comp_footprint<opr::Reduce>();
    add_single_comp_footprint<opr::Host2DeviceCopy>();
    add_single_comp_footprint<opr::LocalShareForward>();
    add_single_comp_footprint<opr::LocalShareBackwardData>();
    add_single_comp_footprint<opr::LocalShareBackwardFilter>();
    add_single_comp_footprint<opr::DeformableConvForward>();
    add_single_comp_footprint<opr::DeformableConvBackwardFilter>();
    add_single_comp_footprint<opr::DeformableConvBackwardData>();
    add_single_comp_footprint<opr::BatchConvBiasForward>();

#if MGB_ENABLE_JSON
    add_single_param_json<opr::Elemwise>();
    add_single_param_json<opr::ConvolutionForward>();
    add_single_param_json<opr::Convolution3D>();
    add_single_param_json<opr::ConvBiasForward>();
    add_single_param_json<opr::ConvolutionBackwardData>();
    add_single_param_json<opr::Convolution3DBackwardData>();
    add_single_param_json<opr::ConvolutionBackwardFilter>();
    add_single_param_json<opr::MatrixMul>();
    add_single_param_json<opr::BatchedMatrixMul>();
    add_single_param_json<opr::Dot>();
    add_single_param_json<opr::MatrixInverse>();
    add_single_param_json<opr::PoolingForward>();
805
    add_single_param_json<opr::PoolingBackward>();
806 807 808 809 810 811 812
    add_single_param_json<opr::SVD>();
    add_single_param_json<opr::MaskConvolution>();
    add_single_param_json<opr::Images2Neibs>();
    add_single_param_json<opr::Local>();
    add_single_param_json<opr::GroupLocal>();
    add_single_param_json<opr::LRN>();
    add_single_param_json<opr::Concat>();
813 814 815
    add_single_param_json<opr::Dimshuffle>();
    add_single_param_json<opr::AxisAddRemove>();
    add_single_param_json<opr::Subtensor>();
816 817 818 819 820 821 822 823 824 825 826
    add_single_param_json<opr::SetSubtensor>();
    add_single_param_json<opr::IncrSubtensor>();
    add_single_param_json<opr::IndexingMultiAxisVec>();
    add_single_param_json<opr::IndexingSetMultiAxisVec>();
    add_single_param_json<opr::IndexingIncrMultiAxisVec>();
    add_single_param_json<opr::MeshIndexing>();
    add_single_param_json<opr::SetMeshIndexing>();
    add_single_param_json<opr::IncrMeshIndexing>();
    add_single_param_json<opr::BatchedMeshIndexing>();
    add_single_param_json<opr::BatchedSetMeshIndexing>();
    add_single_param_json<opr::BatchedIncrMeshIndexing>();
827 828 829 830 831 832 833
    add_single_param_json<opr::Reduce>();
    add_single_param_json<opr::LocalShareForward>();
    add_single_param_json<opr::LocalShareBackwardData>();
    add_single_param_json<opr::LocalShareBackwardFilter>();
    add_single_param_json<opr::DeformableConvForward>();
    add_single_param_json<opr::DeformableConvBackwardFilter>();
    add_single_param_json<opr::DeformableConvBackwardData>();
834
    add_single_param_json<opr::DeformablePSROIPoolingForward>();
835
    add_single_param_json<opr::BatchConvBiasForward>();
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
    add_single_param_json<opr::BatchNormForward>();
    add_single_param_json<opr::Reshape>();
    add_single_param_json<opr::GetVarShape>();
    add_single_param_json<opr::Argsort>();
    add_single_param_json<opr::Argmin>();
    add_single_param_json<opr::Argmax>();
    add_single_param_json<opr::ElemwiseMultiType>();
    add_single_param_json<opr::AdaptivePooling>();
    add_single_param_json<opr::ROIPooling>();
    add_single_param_json<opr::ROIAlign>();
    add_single_param_json<opr::WarpPerspective>();
    add_single_param_json<opr::Remap>();
    add_single_param_json<opr::Resize>();
    add_single_param_json<opr::IndexingOneHot>();
    add_single_param_json<opr::IndexingSetOneHot>();
    add_single_param_json<opr::WarpAffine>();
    add_single_param_json<opr::TopK>();
    add_single_param_json<opr::UniformRNG>();
    add_single_param_json<opr::GaussianRNG>();
    add_single_param_json<opr::Linspace>();
    add_single_param_json<opr::Eye>();
    add_single_param_json<opr::standalone::NMSKeep>();
    add_single_param_json<opr::CvtColor>();
859 860 861
    add_single_param_json<opr::LayerNormBackward>();
    add_single_param_json<opr::AdaptivePoolingBackward>();
    add_single_param_json<opr::DropoutBackward>();
862 863 864 865 866 867 868 869 870 871 872 873

#endif
}

OprFootprint::Result OprFootprint::calc_footprint(cg::OperatorNodeBase* opr) {
    Result rst;
    auto&& dep_map = opr->node_prop().dep_map();
    for (auto&& inp : opr->input()) {
        if (inp->mem_plan().valid())
            rst.inp_layout.push_back(inp->layout());
        else
            rst.inp_layout.push_back({inp->shape(), inp->dtype()});
M
Megvii Engine Team 已提交
874
        if (cg::OperatorNodeBase::NodeProp::is_device_value_dep(dep_map.at(inp))) {
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
            rst.memory += inp->dtype().size(inp->shape().total_nr_elems());
        }
    }
    for (auto&& out : opr->output()) {
        if (out->contain_flag(VarNode::Flag::VOLATILE_CONTENT))
            continue;
        rst.out_shape.push_back(out->shape());
        rst.memory += out->dtype().size(out->shape().total_nr_elems());
    }
    rst.computation = get_computation(opr);
#if MGB_ENABLE_JSON
    rst.param = get_param_json(opr);
#endif
    rst.opr_type = opr->dyn_typeinfo();
    return rst;
}

uint64_t OprFootprint::get_computation(cg::OperatorNodeBase* opr) {
    auto comp_trait = m_type2comp_footprint.find(opr->dyn_typeinfo());
    if (comp_trait != m_type2comp_footprint.end()) {
        return (comp_trait->second)(opr);
    }
    return 0;
}

#if MGB_ENABLE_JSON
M
Megvii Engine Team 已提交
901
std::shared_ptr<json::Value> OprFootprint::get_param_json(cg::OperatorNodeBase* opr) {
902 903 904 905 906 907 908
    auto param_trait = m_type2param_json.find(opr->dyn_typeinfo());
    if (param_trait != m_type2param_json.end()) {
        return (param_trait->second)(opr);
    }
    return json::Object::make();
}

909 910 911 912 913 914 915 916 917
std::shared_ptr<json::Value> OprFootprint::get_serial_param_json(
        Typeinfo* type, serialization::OprLoadContextRawPOD& context) {
    auto param_trait = m_type2serialparam_json.find(type);
    if (param_trait != m_type2serialparam_json.end()) {
        return (param_trait->second)(context);
    }
    return json::Object::make();
}

918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
std::shared_ptr<json::Value> OprFootprint::Result::to_json() const {
    using namespace json;
    std::shared_ptr<Value> comp;
    if (computation) {
        comp = NumberInt::make(computation);
    } else {
        comp = Null::make();
    }
    auto format_shape_arr = [](const TensorShapeArray& arr) {
        auto ret = Array::make();
        for (auto&& shp : arr) {
            auto cur = Array::make();
            for (size_t i = 0; i < shp.ndim; ++i) {
                cur->add(NumberInt::make(shp[i]));
            }
            ret->add(std::move(cur));
        }
        return ret;
    };
    auto format_layout_arr =
            [](const TensorLayoutArray& arr) -> std::shared_ptr<Value> {
        auto ret = Array::make();
        bool have_non_contig = false;
        for (auto&& item : arr) {
            if (item.is_contiguous()) {
                ret->add(json::Null::make());
            } else {
                have_non_contig = true;
                auto cur = Array::make();
                for (size_t i = 0; i < item.ndim; ++i) {
                    cur->add(NumberInt::make(item.stride[i]));
                }
                ret->add(std::move(cur));
            }
        }
        if (!have_non_contig) {
            ret.reset();
        }
        return ret;
    };

    TensorShapeArray inp_shape;
    for (auto&& i : inp_layout)
        inp_shape.push_back(i);
M
Megvii Engine Team 已提交
962 963 964 965 966 967
    auto ret = Object::make(
            {{"computation", std::move(comp)},
             {"memory", NumberInt::make(memory)},
             {"in_shapes", format_shape_arr(inp_shape)},
             {"out_shapes", format_shape_arr(out_shape)},
             {"param", param}});
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
    if (auto inp_layout_json = format_layout_arr(inp_layout)) {
        ret->operator[]("in_layouts") = std::move(inp_layout_json);
    }
    return ret;
}

std::shared_ptr<json::Value> OprFootprint::get_opr_fp_graph_exec(
        cg::ComputingGraph& graph, const SymbolVarArray& outputs) {
    OprFootprint m_opr_footprint;
    ComputingGraph::OutputSpec out_spec;
    for (auto i : outputs) {
        out_spec.emplace_back(i, nullptr);
    }
    graph.options().allocate_static_mem_after_graph_compile = true;
    auto async_exec = graph.compile(out_spec);
    std::vector<std::pair<json::String, std::shared_ptr<json::Value>>> rst_vals;
    auto on_opr = [&m_opr_footprint, &rst_vals](cg::OperatorNodeBase* opr) {
        Result trait(m_opr_footprint.calc_footprint(opr));
        rst_vals.emplace_back(json::String(opr->id_str()), trait.to_json());
        return true;
    };
    async_exec->iter_opr_seq(on_opr);
    auto opr_fp = json::Object::make(rst_vals);
    return json::Object::make(
            {{"opr_footprint", opr_fp}, {"graph_exec", async_exec->to_json()}});
}
#endif

// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}