interpreter_impl.cpp 54.4 KB
Newer Older
M
Megvii Engine Team 已提交
1
/**
2
 * \file imperative/src/impl/interpreter/interpreter_impl.cpp
M
Megvii Engine Team 已提交
3 4
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
M
Megvii Engine Team 已提交
6 7 8 9 10 11
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */

12
#include "./interpreter_impl.h"
13

14 15
#include "range/v3/all.hpp"

16
#include "megbrain/common.h"
17 18
#include "megbrain/imperative/opr_utility.h"
#include "megbrain/imperative/ops/autogen.h"
19 20
#include "megbrain/imperative/ops/backward_graph.h"
#include "megbrain/imperative/ops/opr_attr.h"
21
#include "megbrain/imperative/ops/utility.h"
22 23
#include "megbrain/imperative/utils/to_string.h"

24
#include "../blob_manager_impl.h"
25 26 27
#include "../event_pool.h"
#include "../op_trait.h"

28 29 30 31 32
using namespace mgb;
using namespace imperative;
using namespace interpreter;
using namespace interpreter::intl;

33
#define RECORD_EVENT(type, ...) \
34 35
    if (Profiler::is_profiling()) { \
        Profiler::record<type>(type{__VA_ARGS__}); \
36 37 38
    } \


39 40 41 42 43 44 45 46 47 48
namespace {
    auto tinfo_to_tid(SmallVector<TensorInfo*> tinfo) {
        SmallVector<uint64_t> tid;
        for (auto* ptinfo: tinfo) {
            tid.push_back(ptinfo->id);
        }
        return tid;
    };
}

49 50 51 52
namespace mgb {
    using namespace profiler;
}

53 54 55 56 57
#if defined(_WIN32) || defined(_WIN64)
#define SYMBOL_EXPORT __declspec(dllexport)
#else
#define SYMBOL_EXPORT __attribute__((visibility("default")))
#endif
58 59 60 61 62 63 64

namespace mgb {

/**
 * USAGE
 *
 *   header:
65
 *     namespace mgb { void imperative_log_profile(const char* message); }
66 67 68 69 70
 *
 *   code:
 *     mgb::imperative_log_profile("MY MESSAGE");
 *
 **/
71
SYMBOL_EXPORT
72 73 74 75
void imperative_log_profile_begin(const char* message) {
    RECORD_EVENT(CustomEvent, std::string{message});
}

76
SYMBOL_EXPORT
77 78 79 80
void imperative_log_profile_end(const char* message) {
    RECORD_EVENT(CustomFinishEvent, std::string{message});
}

81
SYMBOL_EXPORT
82 83 84 85 86 87 88
void imperative_log_profile(const char* message){
    imperative_log_profile_begin(message);
    imperative_log_profile_end(message);
}

}

89 90 91 92 93 94 95 96 97 98 99 100 101 102
std::thread::id ChannelImpl::get_worker_tid() {
    return m_worker_state.tid;
}

ChannelImpl::ChannelState& ChannelImpl::get_channel_state() {
    assert_in_channel();
    return m_channel_state;
}

ChannelImpl::WorkerState& ChannelImpl::get_worker_state() {
    assert_in_worker();
    return m_worker_state;
}

103 104 105 106 107 108 109 110 111 112
void ChannelImpl::WorkQueue::on_async_queue_worker_thread_start() {
    sys::set_thread_name("worker");
    m_owner->m_worker_state.tid = std::this_thread::get_id();
    OpDef::set_allocator([&](CompNode device, size_t size) {
        auto blob = Blob::make(device, size);
        m_owner->alloc_tensor_with_evict(blob.get());
        return blob->storage();
    });
}

113
// Do not use m_xxx_state directly
114 115 116
#define m_channel_state
#define m_worker_state

117 118 119 120 121 122 123 124 125
std::unique_ptr<Interpreter::Channel> InterpreterImpl::create_channel() {
    return std::make_unique<ChannelImpl>();
}

Interpreter& Interpreter::inst() {
    static InterpreterImpl inst_;
    return inst_;
}

126
Handle ChannelImpl::put(const HostTensorND& value, bool no_cache) {
127
    MGB_LOCK_GUARD(m_spin);
128
    mgb_assert(check_available(), "Channel already closed");
129
    auto& state = get_channel_state();
130
    auto _ = StackManager::Guard{"Put", &state.stack_manager};
131 132 133 134 135
    auto info = put_impl(value, no_cache);
    return info;
}

TensorInfo* ChannelImpl::put_impl(const HostTensorND& value, bool no_cache) {
136 137 138 139 140
    if (value.empty()) {
        auto layout = value.layout();
        layout.init_contiguous_stride();
        const_cast<HostTensorND&>(value).reset(value.storage(), layout);
    }
141
    auto info = alloc();
142
    init(info, {value.layout(), value.comp_node(), value.proxy_to_default_cpu()});
143
    info->mem_desc.id = StorageIdentifier::make(++m_storage_id);
144
    info->h_value = value;
145
    m_buffer.enqueue(Put{info, value, no_cache});
146
    if (m_async_level == 0) {
147
        sync_impl();
148 149
        info->desc.comp_node.sync();
    }
150 151 152
    return info;
}

153
Handle ChannelImpl::put(const DeviceTensorND& data, const HostTensorND& hvalue) {
154
    MGB_LOCK_GUARD(m_spin);
155
    mgb_assert(check_available(), "Channel already closed");
156 157 158 159
    return put_impl(data, hvalue);
}
TensorInfo* ChannelImpl::put_impl(const DeviceTensorND& data, const HostTensorND& hvalue) {
    auto& state = get_channel_state();
160
    auto _ = StackManager::Guard{"Put", &state.stack_manager};
M
Megvii Engine Team 已提交
161
    auto info = alloc();
162
    RECORD_EVENT(TensorCommandEvent, info->id, TensorCommandKind::Put);
163
    init(info, {data.layout(), data.comp_node()});
164
    info->mem_desc.id = StorageIdentifier::make(++m_storage_id);
165
    info->ptr = Tensor::make(data, hvalue);
166 167
    RECORD_EVENT(TensorProduceEvent, info->id, info->desc.layout, info->desc.comp_node, data.raw_ptr());
    info->status = TensorInfo::Produced;
168
    RECORD_EVENT(TensorCommandFinishEvent, info->id, TensorCommandKind::Put);
M
Megvii Engine Team 已提交
169 170 171
    return info;
}

172
void ChannelImpl::del(Handle handle) {
173
    MGB_LOCK_GUARD(m_spin);
174 175 176
    if (!check_available()){
        return;
    }
177 178 179 180
    del_impl(handle);
}

void ChannelImpl::del_impl(Handle handle) {
181 182 183 184
    mgb_assert(m_valid_handle.count(handle), "invalid handle: %p", handle);
    auto* info = reinterpret_cast<TensorInfo*>(handle);
    m_valid_handle.erase(handle);
    m_buffer.enqueue(Del{info});
185 186
}

187
void ChannelImpl::swap_in(Handle handle) {
188
    MGB_LOCK_GUARD(m_spin);
189
    mgb_assert(check_available(), "Channel already closed");
190 191
    auto& state = get_channel_state();
    if (state.options.enable_swap) {
192 193
        mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
                "invalid handle: %p", handle);
194 195
        auto* info = reinterpret_cast<TensorInfo*>(handle);
        m_buffer.enqueue(SwapIn{info});
196 197 198
    }
}

199
void ChannelImpl::swap_out(Handle handle) {
200
    MGB_LOCK_GUARD(m_spin);
201
    mgb_assert(check_available(), "Channel already closed");
202 203
    auto& state = get_channel_state();
    if (state.options.enable_swap) {
204 205
        mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
                "invalid handle: %p", handle);
206 207
        auto* info = reinterpret_cast<TensorInfo*>(handle);
        m_buffer.enqueue(SwapOut{info});
208 209 210
    }
}

211
void ChannelImpl::drop(Handle handle) {
212
    MGB_LOCK_GUARD(m_spin);
213
    mgb_assert(check_available(), "Channel already closed");
214 215
    auto& state = get_channel_state();
    if (state.options.enable_drop) {
216 217
        mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
                "invalid handle: %p", handle);
218 219
        auto* info = reinterpret_cast<TensorInfo*>(handle);
        m_buffer.enqueue(Drop{info});
220 221 222
    }
}

223
void ChannelImpl::dispatch_default_cpu(
224
        std::shared_ptr<OpDef> op,
225 226 227
        const SmallVector<TensorInfo*>& input_infos,
        const SmallVector<LogicalTensorDesc>& input_descs,
        SmallVector<Handle>* outputs) {
228
    auto& state = get_channel_state();
229 230

    auto name = op->trait()->make_name(*op);
231
    auto _ = StackManager::Guard(name, &state.stack_manager);
232

233
    auto [output_descs, validated] = OpDef::infer_output_attrs_fallible(*op, input_descs);
234
    RECORD_EVENT(ShapeInferEvent, validated);
235

236 237 238
    SmallVector<DeviceTensorND> input_tensornds;
    input_tensornds.reserve(input_descs.size());
    CompNode output_cn;
239 240
    {
        MGB_LOCK_GUARD(m_mutex);
241
        for (auto&& info : input_infos) {
242
            auto input_cn = info->desc.comp_node;
243
            if (!output_cn.valid()) {
244 245 246 247 248 249 250
                output_cn = input_cn;
            } else {
                mgb_assert(output_cn == input_cn, "cannot decide output comp node");
            }

            if (info->ptr && info->ptr->try_get_value()) {
                input_tensornds.emplace_back(info->ptr->get_value().proxy_to_default_cpu());
251
            } else {
252
                // It's OK for SwapOut. We assign h_value before drop ptr
253 254
                mgb_assert(!info->h_value.empty(), "inp->h_value is empty!");
                input_tensornds.emplace_back(info->h_value.proxy_to_default_cpu());
255 256 257 258 259 260 261 262 263 264 265 266 267 268
            }
        }
    }

    outputs->reserve(output_descs.size());
    SmallVector<DeviceTensorND> output_tensornds;
    output_tensornds.reserve(output_descs.size());
    for (auto&& desc : output_descs) {
        // TODO: may conflict with condtake, which need alloc inside
        mgb_assert(!desc.layout.is_empty());
        // use HostTensorND alloc_host for cuda pinned memory
        output_tensornds.emplace_back(HostTensorND(output_cn, desc.layout).proxy_to_default_cpu());
    }

269
    uint64_t op_id = Profiler::next_id();
270

271 272 273 274 275 276 277
    OpDef::apply_on_device_tensornd(*op, input_tensornds, &output_tensornds);

    SmallVector<TensorInfo*> output_infos;
    output_infos.reserve(output_descs.size());
    for (auto&& tensornd : output_tensornds) {
        HostTensorND host_tensornd = HostTensorND::make_proxy(tensornd)
            .proxy_to_comp_node(output_cn);
278
        // use `put` for consistency
279
        auto info = reinterpret_cast<TensorInfo*>(put_impl(host_tensornd, false));
280
        mgb_assert(info->desc.layout.ndim != 0);
281 282 283
        output_infos.push_back(info);
        outputs->push_back(info);
    }
284 285 286 287 288 289 290 291
    auto op_info_getter = [op]{
        std::unordered_map<std::string, std::string> op_info;
        auto props = OpDef::props(*op);
        for (auto&& [key, value]: props) {
            op_info[key] = value;
        }
        return op_info;
    };
292 293 294
    RECORD_EVENT(OpDispatchEvent, op_id, op->trait()->name, op_info_getter,
                 tinfo_to_tid(input_infos), tinfo_to_tid(output_infos),
                 state.stack_manager.dump());
295
}
296

297 298 299 300 301
void ChannelImpl::dispatch_kernel(
        std::shared_ptr<OpDef> op,
        const SmallVector<TensorInfo*>& input_infos,
        const SmallVector<LogicalTensorDesc>& input_descs,
        SmallVector<Handle>* outputs) {
302
    auto& state = get_channel_state();
303 304 305
    auto& options = state.options;

    auto name = op->trait()->make_name(*op);
306
    auto _  = StackManager::Guard{name, &state.stack_manager};
307

308
    auto [output_descs, validated] = OpDef::infer_output_attrs_fallible(*op, input_descs);
309
    RECORD_EVENT(ShapeInferEvent, validated);
310

311
    ApplyOp cmd{Profiler::next_id(), std::move(op)};
312
    cmd.inputs = std::move(input_infos);
313
    cmd.outputs.reserve(output_descs.size());
314
    outputs->reserve(output_descs.size());
315 316
    for (int i = 0; i < output_descs.size(); ++i) {
        auto&& desc = output_descs[i];
317
        auto info = alloc();
318
        init(info, desc);
319 320 321 322 323
        // make sure desc's value is consistent with h_value
        if (!info->desc.value.empty()) {
            info->h_value = HostTensorND::make_proxy(desc.value)
                .proxy_to_comp_node(desc.comp_node);
        }
324
        cmd.outputs.push_back(info);
325
        outputs->push_back(info);
326
    }
327 328 329 330 331 332 333 334
    auto op_info_getter = [op=cmd.op]{
        std::unordered_map<std::string, std::string> op_info;
        auto props = OpDef::props(*op);
        for (auto&& [key, value]: props) {
            op_info[key] = value;
        }
        return op_info;
    };
335 336 337
    RECORD_EVENT(OpDispatchEvent, cmd.id, cmd.op->trait()->name, op_info_getter,
                 tinfo_to_tid(cmd.inputs), tinfo_to_tid(cmd.outputs),
                 state.stack_manager.dump());
338
    m_buffer.enqueue(std::move(cmd));
339
    if (!validated && options.async_level == 1) {
340
        sync_impl();
341
    } else if (options.async_level == 0) {
342
        sync_impl();
343
        // check device error
344
        for (auto&& oup : *outputs) {
345 346
            auto info = reinterpret_cast<TensorInfo*>(oup);
            info->ptr->comp_node().sync();
347
        }
348
    }
349 350 351 352 353
}

SmallVector<Handle> ChannelImpl::apply_op(
        std::shared_ptr<OpDef> op,
        const SmallVector<Handle>& inputs) {
354
    MGB_LOCK_GUARD(m_spin);
355
    mgb_assert(check_available(), "Channel already closed");
356 357 358 359 360 361
    return apply_op_impl(std::move(op), inputs);
}

SmallVector<Handle> ChannelImpl::apply_op_impl(
        std::shared_ptr<OpDef> op,
        const SmallVector<Handle>& inputs) {
362
    auto& state = get_channel_state();
363 364 365 366 367 368 369 370 371 372 373 374
    for (auto i : inputs) {
        mgb_assert(m_valid_handle.find(i) != m_valid_handle.end(),
                "invalid handle: %p", i);
    }
    SmallVector<TensorInfo*> input_infos;
    input_infos.reserve(inputs.size());
    SmallVector<LogicalTensorDesc> input_descs;
    input_descs.reserve(inputs.size());
    {
        MGB_LOCK_GUARD(m_mutex);
        for (auto i : inputs) {
            auto info = reinterpret_cast<TensorInfo*>(i);
375
            mgb_assert(!info->invalid, "an input tensor is unusable due to previous error");
376 377 378 379 380 381
            input_infos.push_back(info);
            input_descs.push_back(info->desc);
        }
    }

    SmallVector<Handle> outputs;
382
    DispatchMode dispatch_mode = state.options.enable_host_compute
383 384 385
            ? OpDef::decide_dispatch_mode(*op, input_descs)
            : DispatchMode::KERNEL;
    switch (dispatch_mode) {
386 387 388 389 390 391 392 393 394
        case DEFAULT_CPU: {
            dispatch_default_cpu(op, input_infos, input_descs, &outputs);
            break;
        }
        case KERNEL: {
            dispatch_kernel(op, input_infos, input_descs, &outputs);
            break;
        }
    }
395 396 397
    return outputs;
}

398
HostTensorND ChannelImpl::get_value(Handle handle) {
399
    MGB_LOCK_GUARD(m_spin);
400
    mgb_assert(check_available(), "Channel already closed");
401 402 403
    mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
               "invalid handle: %p", handle);
    auto info = reinterpret_cast<TensorInfo*>(handle);
404
    // donnot use info->value_fetched, it's unsafe
405
    mgb_assert(!info->invalid, "tensor is unusable due to previous error");
406
    return wait_tensor(info, TensorProp::HostValue)->get_value();
407 408
}

409
TensorShape ChannelImpl::get_shape(Handle handle) {
410
    MGB_LOCK_GUARD(m_spin);
411
    mgb_assert(check_available(), "Channel already closed");
412 413 414 415 416 417
    mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
               "invalid handle: %p", handle);
    auto info = reinterpret_cast<TensorInfo*>(handle);
    if (info->desc.layout.ndim != 0) {
        return info->desc.layout;
    }
418
    TensorShape ret = wait_tensor(info, TensorProp::Shape)->layout();
419 420 421 422
    mgb_assert(ret.ndim != 0);
    return ret;
}

423
DType ChannelImpl::get_dtype(Handle handle) {
424
    MGB_LOCK_GUARD(m_spin);
425
    mgb_assert(check_available(), "Channel already closed");
426 427 428
    mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
               "invalid handle: %p", handle);
    auto info = reinterpret_cast<TensorInfo*>(handle);
429
    RECORD_EVENT(TensorGetPropEvent, info->id, TensorProp::DType);
430 431 432 433 434
    auto ret = info->desc.layout.dtype;
    mgb_assert(ret.valid());
    return ret;
}

435
CompNode ChannelImpl::get_device(Handle handle) {
436
    MGB_LOCK_GUARD(m_spin);
437
    mgb_assert(check_available(), "Channel already closed");
438 439 440
    mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
               "invalid handle: %p", handle);
    auto info = reinterpret_cast<TensorInfo*>(handle);
441
    RECORD_EVENT(TensorGetPropEvent, info->id, TensorProp::Device);
442 443 444 445 446
    auto ret = info->desc.comp_node;
    mgb_assert(ret.valid());
    return ret;
}

447
DeviceTensorND ChannelImpl::get_dev_tensor(Handle handle) {
448
    MGB_LOCK_GUARD(m_spin);
449
    mgb_assert(check_available(), "Channel already closed");
450 451 452
    mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
               "invalid handle: %p", handle);
    auto info = reinterpret_cast<TensorInfo*>(handle);
453
    return wait_tensor(info, TensorProp::DevValue)->dev_tensor();
454 455 456
}

void ChannelImpl::sync() {
457
    MGB_LOCK_GUARD(m_spin);
458
    mgb_assert(check_available(), "Channel already closed");
459 460 461 462
    sync_impl();
}

void ChannelImpl::sync_impl() {
463
    m_buffer.flush();
464 465 466 467 468 469
    m_worker.wait_all_task_finish();
    MGB_LOCK_GUARD(m_mutex);
    check_worker_exc_unsafe();
}

void ChannelImpl::close() {
470
    MGB_LOCK_GUARD(m_spin);
471 472 473 474 475
    if (!check_available()) {
        return;
    }
    std::vector<Handle> valid_handles(m_valid_handle.begin(), m_valid_handle.end());
    for (auto* handle: valid_handles) {
476
        del_impl(handle);
477 478 479
    }
    mgb_assert(m_valid_handle.empty());
    mgb_log_debug("%ld tensor exists before channel close", (long)valid_handles.size());
480
    sync_impl();
481
    m_closed = true;
482 483
}

484
size_t ChannelImpl::get_option(std::string name) {
485
    MGB_LOCK_GUARD(m_spin);
486
    mgb_assert(check_available(), "Channel already closed");
487 488
    auto& state = get_channel_state();
    return state.options.get_option(name);
489 490
}

491
void ChannelImpl::set_option(std::string name, size_t value) {
492
    MGB_LOCK_GUARD(m_spin);
493
    mgb_assert(check_available(), "Channel already closed");
494 495
    auto& state = get_channel_state();
    state.options.set_option(name, value);
496
    m_buffer.enqueue(SetOption{name, value});
497 498 499
}

TensorInfo* ChannelImpl::alloc() {
500
    auto& state = get_channel_state();
501 502 503 504 505 506
    auto info = [this]{
        MGB_LOCK_GUARD(m_mutex);
        return m_pool.alloc();
    }();
    info->id = Profiler::next_id();
    if (Profiler::is_profiling()) {
507 508
        size_t tensor_id = state.stack_manager.current()->next_id("tensor");
        info->name = state.stack_manager.dump().to_string() + ssprintf(":%zu", tensor_id);
509
    }
510
    return info;
511 512
}

513 514 515 516 517
void ChannelImpl::init(TensorInfo* info, LogicalTensorDesc desc) {
    m_valid_handle.insert(info);
    RECORD_EVENT(TensorDeclareEvent, info->id, info->name);
    info->status = TensorInfo::Allocated;
    info->desc = std::move(desc);
518 519 520
    info->mem_desc.layout = info->desc.layout;
    info->mem_desc.cn = info->desc.comp_node;
    info->mem_desc.offset = 0;
521 522
}

523 524 525 526 527 528 529 530 531 532 533 534

void ChannelImpl::do_drop(TensorInfo* ptr, bool user=false) {
    if (!ptr->producer) {
        if (user) {
            mgb_log_warn("the input that produced tensor %p has been deleted, this drop operation will be ignored", ptr);
        }
        return;
    }
    if (ptr->evict_type != EvictType::NONE) {
        return;
    }
    ptr->evict_type = EvictType::DROP;
535
    ptr->status = TensorInfo::Dropped;
536 537 538
    release_tensor(ptr);
}

539
void ChannelImpl::free(TensorInfo* ptr) {
540 541
    auto& state = get_worker_state();
    if (state.options.enable_dtr_auto_drop) {
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
        // Evicting a tensor, rather than freeing it, can avoid pinning
        // potentially exploding amounts of memory and allow us to save
        // more memory.
        ptr->allow_delete = true;
        if (!ptr->ref_cnt) {
            recursive_free(ptr);
        } else {
            do_drop(ptr);
        }
    } else {
        real_free(ptr);
    }
}

void ChannelImpl::recursive_free(TensorInfo* ptr) {
557
    RECORD_EVENT(TensorCommandEvent, ptr->id, TensorCommandKind::RecFree);
558
    SmallVector<TensorInfo*> inps;
559 560 561 562 563 564 565 566 567 568 569 570 571
    if (ptr->producer) {
        for (auto i : ptr->producer->inputs) {
            if (i && --i->ref_cnt == 0) {
                inps.push_back(i);
            }
        }
    }
    real_free(ptr);
    for (auto i : inps) {
        if (i->allow_delete) {
            recursive_free(i);
        }
    }
572
    RECORD_EVENT(TensorCommandFinishEvent, ptr->id, TensorCommandKind::RecFree);
573 574 575
}

void ChannelImpl::real_free(TensorInfo* ptr) {
576 577
    auto& state = get_worker_state();
    if (ptr->size_exceeds_thd(state.options.dtr_evictee_minimum_size)) {
578 579 580 581
        m_dtr.erase_candidate(ptr);
    }
    detach_users(ptr);
    ptr->detach_producer();
582 583 584 585 586 587
    bool has_value = ptr->ptr != nullptr;
    if (has_value) {
        RECORD_EVENT(TensorReleaseEvent, ptr->id);
    }
    RECORD_EVENT(TensorEraseEvent, ptr->id, ptr->ptr_use_count);
    ptr->status = TensorInfo::Deleted;
588
    MGB_LOCK_GUARD(m_mutex);
589 590 591
    m_pool.free(ptr);
}

592
ChannelImpl::ChannelImpl() : m_worker(this), m_buffer(this){}
593

594 595 596
ChannelImpl::~ChannelImpl() {
    close();
}
597

598
void ChannelImpl::produce_tensor(TensorInfo* dest, TensorPtr ptr) {
599
    auto& state = get_worker_state();
600
    MGB_LOCK_GUARD(m_mutex);
601
    m_dtr.update_used_time(dest);
602
    RECORD_EVENT(TensorProduceEvent, dest->id, ptr->layout(), ptr->comp_node(), ptr->dev_tensor().raw_ptr());
603 604 605
    // update tensor desc for static infer
    dest->desc.layout = ptr->layout();
    dest->desc.comp_node = ptr->comp_node();
606
    dest->memory = ptr->blob()->size();
607
    dest->ptr = std::move(ptr);
608
    dest->evict_type = EvictType::NONE;
609
    dest->status = TensorInfo::Produced;
610
    if (dest->size_exceeds_thd(state.options.dtr_evictee_minimum_size)) {
611 612
        m_dtr.insert_candidate(dest);
    }
613
    notify_tensor_unsafe(dest);
614 615
}

616
void ChannelImpl::release_tensor(TensorInfo* dest) {
617
    RECORD_EVENT(TensorReleaseEvent, dest->id);
618 619 620 621
    MGB_LOCK_GUARD(m_mutex);
    dest->ptr.reset();
}

622
void ChannelImpl::regenerate(TensorInfo* dest) {
623
    if (dest->evict_type == EvictType::DROP) {
624 625 626
        auto &&path = dest->producer;
        m_apply_stack.push({ApplyOp{path->id, path->op, path->inputs, path->outputs, {}}, 0, dest});
        if (!m_applying) flush_apply_stack();
627
    } else if (dest->evict_type == EvictType::SWAP) {
628
        RECORD_EVENT(TensorCommandEvent, dest->id, TensorCommandKind::ReGen);
629
        produce_tensor(dest, Tensor::make(dest->h_value));
630
        RECORD_EVENT(TensorCommandFinishEvent, dest->id, TensorCommandKind::ReGen);
631 632 633
    }
}

634 635 636
void ChannelImpl::do_apply_op(const ApplyOp& cmd) {
    using namespace ranges;
    using namespace ranges::views;
637
    auto& state = get_worker_state();
638
    bool profiling_device = Profiler::is_profiling() && Profiler::get_option("profile_device", 0);
639
    uint64_t apply_id = cmd.id;
640 641 642 643 644 645
    struct TensorWithDesc {
        TensorPtr tensor;
        MemoryDesc desc;
    };
    SmallVector<TensorWithDesc> inputs;
    inputs.reserve(cmd.inputs.size());
646 647 648
    // refcnt == 1, owners: [TensorInfo::ptr]
    for (auto i : cmd.inputs) {
        mgb_assert(i->ptr, "Invalid input tensor ptr!");
649
        // refcnt ++, owners: [i->ptr, tensor_inputs]
650 651
        // tensor_inputs.push_back(i->ptr);
        inputs.push_back({i->ptr, i->mem_desc});
652
    }
653 654 655
    if (state.options.enable_dtr_auto_drop && state.options.dtr_eviction_threshold > 0) {
        auto_evict(0);
    }
656 657 658
    auto apply_on_physical_tensor = [&](auto&& self, const OpDef& def, SmallVector<TensorWithDesc> inputs) -> SmallVector<TensorWithDesc> {
        auto apply_functor = [&](std::shared_ptr<OpDef> op, SmallVector<TensorWithDesc> inputs, size_t nr_outputs) -> SmallVector<TensorWithDesc> {
            auto opname = op->trait()->make_name(*op);
659
            imperative_log_profile_begin(opname.c_str());
660
            auto outputs = self(self, *op, inputs);
661
            imperative_log_profile_end(opname.c_str());
662 663 664 665 666 667 668 669 670 671
            return outputs;
        };
        auto const_functor = [&](TensorPtr value) -> TensorWithDesc {
            return {value, MemoryDesc{value->layout(), 0, value->comp_node(), StorageIdentifier::make()}};
        };
        if (def.trait()->make_forward_graph) {
            // apply recursivily
            SmallVector<LogicalTensorDesc> input_descs;
            for (auto&& input: inputs) {
                input_descs.push_back({{{}, input.tensor->dtype()}, input.tensor->comp_node()});
672
            }
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
            auto forward_graph = OpDef::make_forward_graph(def, input_descs);
            auto outputs = forward_graph.apply(inputs, apply_functor, const_functor);
            return outputs;
        }
        SmallVector<TensorPtr> input_tensors;
        SmallVector<MemoryDesc> input_descs;
        for (auto&& input: inputs) {
            input_tensors.push_back(input.tensor);
            input_descs.push_back(input.desc);
        }
        auto [output_descs, output_tensors, workspaces] = init_output_and_workspace(def, input_tensors, input_descs);
        if (!output_descs.empty()) {
            OpDef::execute(def, input_tensors, output_tensors, workspaces);
        } else {
            output_tensors = OpDef::apply_on_physical_tensor(def, input_tensors);
            for (auto&& output_tensor: output_tensors) {
                output_descs.push_back(MemoryDesc{output_tensor->layout(), 0, output_tensor->comp_node(), StorageIdentifier::make()});
690 691
            }
        }
692 693 694 695 696 697
        SmallVector<TensorWithDesc> outputs;
        for (auto&& [output_tensor, output_desc]: ranges::zip_view(output_tensors, output_descs)) {
            outputs.push_back({output_tensor, output_desc});
        }
        return outputs;
    };
698
    RECORD_EVENT(OpExecuteEvent, apply_id);
699
    // Begin profiling operator
700 701 702 703
    SmallVector<std::pair<CompNode, uint64_t>> kernels;
    if (profiling_device) {
        // Collecting devices
        SmallVector<CompNode> devices;
704 705 706
        for (auto&& i : concat(cmd.inputs, cmd.outputs)) {
            if (i != nullptr && count(devices, i->desc.comp_node) == 0) {
                devices.push_back(i->desc.comp_node);
707
                kernels.push_back({i->desc.comp_node, Profiler::next_id()});
708 709 710
            }
        }
    }
711 712 713 714 715 716 717 718 719
    for (auto* input: cmd.inputs) {
        auto input_id = input->id;
        RECORD_EVENT(OpInputEvent, input_id);
        RECORD_EVENT(TensorUsageEvent, input_id);
        RECORD_EVENT(OpInputFinishEvent, input_id);
    }
    // Fused by command buffer. @see: CommandBuffer::fuse_del
    // Now if dest is inplacable, it's refcnt would be decreased to 1 and owned by tensor_inputs after Del.
    // Note for exprs like 'y = x op x', inplace is unsupported yet but Del would be also fused.
720
    for (auto* del : cmd.dels) {
721 722 723
        // refcnt --, owners: [tensor_inputs]
        // if it's decreased to 1, would be detected at @see: proxy_graph_detail::apply_on_physical_tensor
        uint64_t del_id = del->id;
724
        RECORD_EVENT(TensorCommandEvent, del_id, TensorCommandKind::Del);
725
        free(del);
726
        RECORD_EVENT(TensorCommandFinishEvent, del_id, TensorCommandKind::Del);
727
    }
728 729 730 731
    // Before wait
    //TODO: split operator wait and execute so that OpWait could be corrected recorded.
    // Before execute
    for (auto&& [device, kernel_id]: kernels) {
732 733
        RECORD_EVENT(KernelLaunchEvent, apply_id, kernel_id, device);
        RECORD_EVENT(RecordDeviceEvent, Timer::record_device(device));
734 735 736
    }
    // Apply op
    // Here std::move is REQUIRED for removing duplicated references.
737
    auto outputs = apply_on_physical_tensor(apply_on_physical_tensor, *cmd.op, inputs);
738
    // After execute
739
    for (auto&& [device, kernel_id]: kernels) {
740 741
        RECORD_EVENT(RecordDeviceEvent, Timer::record_device(device));
        RECORD_EVENT(KernelLaunchFinishEvent, apply_id, kernel_id, device);
742 743
    }
    // End profiling operator
744 745
    mgb_assert(outputs.size() == cmd.outputs.size());
    for (size_t i = 0; i < outputs.size(); ++i) {
746
        auto output = cmd.outputs[i];
747 748 749 750 751 752 753 754
        if (output == nullptr) {
            RECORD_EVENT(OpOutputEvent, 0);
            RECORD_EVENT(OpOutputFinishEvent, 0);
        } else if (output->ptr != nullptr) {
            RECORD_EVENT(OpOutputEvent, output->id);
            RECORD_EVENT(OpOutputFinishEvent, output->id);
        } else {
            RECORD_EVENT(OpOutputEvent, output->id);
755 756
            produce_tensor(output, outputs[i].tensor);
            output->mem_desc = outputs[i].desc;
757 758
            RECORD_EVENT(OpOutputFinishEvent, output->id);
            sample_on_device(output->desc.comp_node, false);
759 760 761 762 763 764 765 766
        }
    }

    if (state.options.enable_dtr_auto_drop) {
        double estimate_compute_time = 0;
        for (auto i : cmd.inputs) {
            estimate_compute_time += i->memory;
        }
767 768
        for (auto i : outputs) {
            estimate_compute_time += i.tensor->blob()->size();
769 770 771 772 773 774 775 776 777
        }
        m_dtr.estimate_timestamp += estimate_compute_time / 1e8;
        for (auto i : cmd.outputs) {
            if (i != nullptr) {
                i->compute_time = estimate_compute_time;
            }
        }
        m_dtr.unpin(cmd.inputs);
    }
778 779
    RECORD_EVENT(OpExecuteFinishEvent, apply_id);
    // End profiling operator
780
}
781

782 783
void ChannelImpl::flush_apply_stack() {
    m_applying = true;
784
    auto& state = get_worker_state();
785 786 787 788 789 790 791
    while (!m_apply_stack.empty()) {
        auto& [cmd, idx, recomp] = m_apply_stack.top(); // cmd.inputs[0~idx-1] is in memory
        if (idx == 0) {
            if (state.options.enable_dtr_auto_drop) {
                m_dtr.pin(cmd.inputs);
            }
            if (recomp) {
792
                RECORD_EVENT(TensorCommandEvent, recomp->id, TensorCommandKind::ReGen);
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
            }
        }
        bool regen = false;
        for (size_t i = idx; i < cmd.inputs.size(); i ++) {
            auto&& p = cmd.inputs[i];
            if (state.options.enable_dtr_auto_drop) {
                m_dtr.update_used_time(p);
            }
            if (!p->ptr && p->evict_type != EvictType::NONE) {
                idx = i + 1;
                regenerate(p); // add ApplyOp to the stack
                regen = true;
                break;
            }
        }
        if (regen) continue;
        // the required input tensors are already in memory
        auto cmd_backup = cmd;
        auto recomp_backup = recomp;
        m_apply_stack.pop();
        do_apply_op(cmd_backup);
        if (recomp_backup) {
815
            RECORD_EVENT(TensorCommandFinishEvent, recomp_backup->id, TensorCommandKind::ReGen);
816 817
            for (auto o : cmd_backup.outputs) {
                if (o) {
818 819 820 821
                    m_dtr.update_dsu_after_recompute(o);
                }
            }
        }
822
    }
823
    m_applying = false;
824 825
}

826
bool ChannelImpl::auto_evict(size_t force_num) {
827
    auto& state = get_worker_state();
828
    if (!m_dtr.comp_node.valid()) {
829
        return false;
830 831
    }
    size_t current_memory = m_dtr.comp_node.get_used_memory();
832 833
    size_t flag = false;
    while ((state.options.dtr_eviction_threshold > 0 && current_memory > state.options.dtr_eviction_threshold) || force_num > 0) {
834
        RECORD_EVENT(AutoEvictEvent);
835
        sample_on_device(m_dtr.comp_node, false);
836
        auto best = m_dtr.find_best_tensor(state.options.enable_dtr_sqrt_sampling && !force_num);
837 838 839 840 841
        if (!best) {
            break;
        }
        if (best->ptr.unique() && best->ptr->blob().unique()) {
            current_memory -= best->memory;
842 843 844 845
            if (force_num > 0) {
                force_num --;
            }
            flag = true;
846 847 848 849
        }
        do_drop(best);
        if (best->evict_type == EvictType::DROP) {
            m_dtr.update_dsu_after_evict(best);
850
        }
851
        sample_on_device(m_dtr.comp_node, false);
852
        RECORD_EVENT(AutoEvictFinishEvent);
853
    }
854
    return flag;
855 856
}

857 858 859
void ChannelImpl::detach_users(TensorInfo* dest) {
    SmallVector<TensorInfo::ComputePath*> users = dest->users;
    for (auto* user: users) {
860 861 862
        SmallVector<TensorInfo*> outputs = user->outputs;
        SmallVector<TensorInfo*> inputs = user->inputs;
        for (auto* output: outputs) {
863 864 865 866
        // When a `ComputePath` is detach from it's input,
        // there is no need to reserve it,
        // so we detach all output of this path
        // to decrease it's `ref_cnt` to zero.
867 868 869 870 871
            if (output == nullptr) {
                continue;
            }
            regenerate(output);
            output->detach_producer();
872 873 874
            for (auto* input: inputs) {
                input->ref_cnt --;
            }
875
        }
876
        // now user is dead
877
    }
878
    mgb_assert(dest->users.empty(), "ComputePath leaking");
879 880
}

881 882 883 884
bool ChannelImpl::check_available() {
    return !m_closed;
}

885 886 887 888 889 890 891 892
TensorPtr ChannelImpl::wait_tensor(TensorInfo* info, TensorProp prop) {
    m_buffer.flush();
    std::unique_lock<decltype(m_mutex)> lock(m_mutex);
    mgb_assert(!m_waitee, "duplicate waitee");
    m_waitee = info;
    m_waitee_id = Profiler::next_id();
    RECORD_EVENT(TensorWaitPropEvent, info->id, m_waitee_id, prop);
    bool require_host = prop == TensorProp::HostValue;
893 894 895 896 897 898 899 900 901 902
    auto host_available = [&]{
        return info->ptr && info->ptr->value_fetched();
    };
    if (require_host && !host_available()) {
        // avoid dead lock
        lock.unlock();
        m_buffer.enqueue(GetValue{info});
        m_buffer.flush();
        lock.lock();
    }
903 904
    m_cv.wait(lock, [&]() {
        check_worker_exc_unsafe();
905
        return require_host ? host_available() : static_cast<bool>(info->ptr);
906
    });
907
    RECORD_EVENT(TensorWaitPropFinishEvent, info->id, m_waitee_id, prop);
908
    m_waitee = nullptr;
909 910 911 912 913 914 915
    return info->ptr;
}

void ChannelImpl::notify_tensor_unsafe(TensorInfo* info) {
    if (info == m_waitee) {
        RECORD_EVENT(TensorNotifyPropEvent, info->id);
        m_cv.notify_all();
916
    }
917 918 919 920 921 922 923
}

std::unordered_set<TensorInfo*> ChannelImpl::collect_valid_tensors() {
    std::unordered_set<TensorInfo*> valid_tensors;
    for (auto* handle: m_valid_handle) {
        auto* info = reinterpret_cast<TensorInfo*>(handle);
        valid_tensors.insert(info);
924
    }
925
    return valid_tensors;
926 927
}

928
void ChannelImpl::alloc_tensor_with_evict(Blob* x) {
929 930 931 932 933 934 935 936 937 938 939
    auto reserve_size = [&](size_t size) {
        if (!m_dtr.comp_node.valid()) {
            return false;
        }
        while (size > m_dtr.comp_node.get_max_block_size_available()) {
            bool evict_suc = auto_evict(1);
            if (!evict_suc) return false;
        }
        return true;
    };
    auto pre_level = set_log_level(LogLevel::NO_LOG);
940 941
    reserve_size(x->size());
    MGB_TRY { BlobManager::inst()->alloc_direct(x, x->size()); }
942 943 944 945 946 947
    MGB_CATCH(MemAllocError&, {
        bool suc = false;
        while (!suc) {
            if (!auto_evict(1)) {
                break;
            }
948
            MGB_TRY { BlobManager::inst()->alloc_direct(x, x->size()); }
949 950 951 952 953 954 955
            MGB_CATCH(MemAllocError&, { continue; });
            suc = true;
        }
        if (!suc) {
            set_log_level(pre_level);
            mgb_log_warn("reallocating all cuda memory to alleviate fragmentation, the performance may be affected");
            set_log_level(LogLevel::NO_LOG);
956
            BlobManager::inst()->defrag(x->comp_node());
957
            BlobManager::inst()->alloc_direct(x, x->size());
958 959 960 961 962
        }
    });
    set_log_level(pre_level);
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
std::tuple<SmallVector<MemoryDesc>, SmallVector<TensorPtr>, SmallVector<TensorPtr>> ChannelImpl::init_output_and_workspace(
        const OpDef& def,
        SmallVector<TensorPtr> inputs,
        SmallVector<MemoryDesc> inputs_mem_desc) {

    auto [outputs_desc, workspaces_desc] = OpDef::infer_output_mem_desc(def, inputs, inputs_mem_desc);
    if (!outputs_desc.size()) {
        // failed to infer memplan
        return {{}, {}, {}};
    }
    // refine storage id to make it unique
    for (auto&& desc : outputs_desc) {
        if (desc.id->is_sys_alloc()) {
            // TODO: there may be some outputs sharing the same storage id
            desc.id->id = ++ m_storage_id;
        }
    }
980
    auto& state = get_worker_state();
981 982 983 984 985
    auto alloc_storage = [&](SmallVector<MemoryDesc>& desc) {
        SmallVector<TensorPtr> tensors;
        for (size_t i = 0; i < desc.size(); i ++) {
            if (desc[i].id->is_sys_alloc()) {
                tensors.push_back(Tensor::make(desc[i].layout, desc[i].cn));
986
                if (state.options.enable_dtr_auto_drop && !desc[i].layout.is_empty()) {
987
                    alloc_tensor_with_evict(tensors.back()->blob().get());
988
                }
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
            } else if (desc[i].id->is_from_other()) {
                for (size_t j = 0; j < inputs_mem_desc.size();j ++) {
                    if (inputs_mem_desc[j].id->desc == desc[i].id->desc) {
                        tensors.push_back(inputs[j]->sub(desc[i].offset, desc[i].layout));
                        break;
                    }
                }
            } else if (desc[i].id->is_device_ptr()) {
                tensors.push_back(desc[i].id->ptr);
            } else {
                mgb_assert(0, "not implemented");
            }
        }
        return tensors;
    };
1004

1005 1006 1007
    return {outputs_desc, alloc_storage(outputs_desc), alloc_storage(workspaces_desc)};
}

1008
void ChannelImpl::process_one_task(Command& icmd) {
1009 1010
    using namespace ranges;
    using namespace ranges::views;
1011
    auto& state = get_worker_state();
1012
    auto& options = state.options;
1013
    //TODO: remove std::visit for support osx 10.12
1014 1015
    auto cmd_visitor = [&](const auto& cmd) {
            using T = std::decay_t<decltype(cmd)>;
1016
            if constexpr (std::is_same_v<T, Put>) {
1017 1018
                RECORD_EVENT(TensorCommandEvent, cmd.dest->id, TensorCommandKind::Put);
                RECORD_EVENT(RecordDeviceEvent, Timer::record_device(cmd.value.comp_node()));
1019
                auto value = cmd.no_cache ? std::make_shared<Tensor>(cmd.value) : Tensor::make(cmd.value);
1020
                RECORD_EVENT(RecordDeviceEvent, Timer::record_device(cmd.value.comp_node()));
1021
                produce_tensor(cmd.dest, std::move(value));
1022
                RECORD_EVENT(TensorCommandFinishEvent, cmd.dest->id, TensorCommandKind::Put);
1023
                sample_on_device(cmd.dest->desc.comp_node, false);
1024
            } else if constexpr (std::is_same_v<T, ApplyOp>) {
1025 1026 1027 1028 1029 1030 1031 1032 1033
                for (auto& i : cmd.inputs) {
                    if (i->invalid) {
                        MGB_LOCK_GUARD(m_mutex);
                        for (auto& i : cmd.outputs) {
                            i->invalid = true;
                        }
                        return;
                    }
                }
1034 1035
                m_apply_stack.push({cmd, 0, nullptr});
                flush_apply_stack();
1036 1037 1038
                for (size_t i = 0; i < cmd.outputs.size(); ++i) {
                    auto output = cmd.outputs[i];
                    if (output == nullptr) {
1039 1040
                        continue;
                    }
1041
                    if (state.options.enable_dtr_auto_drop) {
1042
                        output->dsu_ptr = std::make_shared<DsuNode>(output->compute_time);
1043 1044
                    }
                }
1045 1046 1047 1048 1049 1050
                if (state.options.enable_drop && state.options.record_computing_path) {
                    auto is_inplace = [](std::tuple<TensorInfo*, TensorInfo*> tuple2) {
                        auto& input = std::get<0>(tuple2);
                        auto& output = std::get<1>(tuple2);
                        if (!input->ptr || !output->ptr) {
                            return false;
1051
                        }
1052 1053
                        return input->ptr->blob()->storage() == output->ptr->blob()->storage();
                    };
1054 1055 1056 1057 1058 1059 1060
                    // FIXME: do not use opname as identifier
                    auto get_name = [](const OpDef& opdef) {
                        if (auto attr = opdef.try_cast_final<OprAttr>()) {
                            return attr->type.c_str();
                        }
                        return opdef.dyn_typeinfo()->name;
                    };
1061 1062 1063 1064 1065 1066 1067

                    auto is_cross_cn = [comp_node=m_dtr.comp_node](TensorInfo* info){
                        return info->desc.comp_node != comp_node;
                    };

                    bool cross_cn = any_of(concat(cmd.inputs, cmd.outputs), is_cross_cn);
                    bool inplace = any_of(cartesian_product(cmd.inputs, cmd.outputs), is_inplace);
1068

1069 1070
                    if (!inplace && !cross_cn && !m_dtr.is_bad_op(get_name(*cmd.op))) {
                        TensorInfo::ComputePath::make(cmd.id, cmd.op, cmd.inputs, cmd.outputs);
1071
                        size_t detach_cnt = 0;
1072 1073 1074 1075 1076 1077 1078
                        if (!strcmp(get_name(*cmd.op), "BatchNorm") && cmd.outputs.size() == 5) {
                            cmd.outputs[0]->detach_producer(); // detach running_mean
                            cmd.outputs[1]->detach_producer(); // detach running_var
                            for (auto input : cmd.inputs) {
                                input->ref_cnt -= 2;
                            }
                        }
1079
                        for (auto output : cmd.outputs) {
1080
                            if (output->producer && !output->size_exceeds_thd(state.options.dtr_evictee_minimum_size)) {
1081 1082 1083 1084 1085 1086 1087 1088
                                output->detach_producer();
                                detach_cnt ++;
                            }
                        }
                        for (auto input : cmd.inputs) {
                            input->ref_cnt -= detach_cnt;
                        }
                    }
1089 1090
                }
            } else if constexpr (std::is_same_v<T, Del>) {
1091
                RECORD_EVENT(TensorCommandEvent, cmd.dest->id, TensorCommandKind::Del);
1092 1093
                CompNode device = cmd.dest->desc.comp_node;
                uint64_t tensor_id = cmd.dest->id;
1094
                free(cmd.dest);
1095
                RECORD_EVENT(TensorCommandFinishEvent, tensor_id, TensorCommandKind::Del);
1096
                sample_on_device(device, false);
1097
            } else if constexpr (std::is_same_v<T, GetValue>) {
1098
                if (cmd.dest->invalid) return;
1099
                imperative_log_profile_begin("GetValue");
1100 1101 1102
                if (!cmd.dest->ptr && cmd.dest->evict_type != EvictType::NONE) {
                    regenerate(cmd.dest);
                }
1103 1104
                cmd.dest->ptr->fetch_value();
                MGB_LOCK_GUARD(m_mutex);
1105
                notify_tensor_unsafe(cmd.dest);
1106
                imperative_log_profile_end("GetValue");
1107
            } else if constexpr (std::is_same_v<T, SwapIn>) {
1108
                if (cmd.dest->invalid) return;
1109
                RECORD_EVENT(TensorCommandEvent, cmd.dest->id, TensorCommandKind::SwapIn);
1110
                produce_tensor(cmd.dest, Tensor::make(cmd.dest->h_value));
1111
                RECORD_EVENT(TensorCommandFinishEvent, cmd.dest->id, TensorCommandKind::SwapIn);
1112
                sample_on_device(cmd.dest->desc.comp_node, false);
1113
            } else if constexpr (std::is_same_v<T, SwapOut>) {
1114
                if (cmd.dest->invalid) return;
1115
                RECORD_EVENT(TensorCommandEvent, cmd.dest->id, TensorCommandKind::SwapOut);
1116
                cmd.dest->h_value = cmd.dest->ptr->get_value();
1117 1118
                if (cmd.dest->evict_type == EvictType::NONE) {
                    cmd.dest->evict_type = EvictType::SWAP;
1119 1120
                    cmd.dest->status = TensorInfo::Swapped;
                    release_tensor(cmd.dest);
1121
                }
1122
                RECORD_EVENT(TensorCommandFinishEvent, cmd.dest->id, TensorCommandKind::SwapOut);
1123
                sample_on_device(cmd.dest->desc.comp_node, false);
1124
            } else if constexpr (std::is_same_v<T, Drop>) {
1125
                if (cmd.dest->invalid) return;
1126
                RECORD_EVENT(TensorCommandEvent, cmd.dest->id, TensorCommandKind::Drop);
1127
                do_drop(cmd.dest, true);
1128
                RECORD_EVENT(TensorCommandFinishEvent, cmd.dest->id, TensorCommandKind::Drop);
1129
            } else if constexpr (std::is_same_v<T, SetOption>) {
1130
                options.set_option(cmd.key, cmd.value);
1131
            } else if constexpr (std::is_same_v<T, StartProfile>) {
1132
                RECORD_EVENT(StartProfileEvent);
1133
                CompNode::sync_all();
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
                for (auto* info: cmd.capture_tensors) {
                    RECORD_EVENT(TensorDeclareEvent, info->id, info->name);
                    if (info->status == TensorInfo::Produced) {
                        // TODO: handle swap/drop
                        RECORD_EVENT(TensorProduceEvent, info->id, info->desc.layout, info->desc.comp_node, info->ptr->dev_tensor().raw_ptr());
                    }
                }
                CompNode::foreach([&](CompNode device){
                    if (Profiler::get_option("sample_rate", 0)) {
                        sample_on_device(device, true);
                    }
1145
                    RECORD_EVENT(RecordDeviceEvent, Timer::record_device(device));
1146 1147
                });
                RECORD_EVENT(StartProfileFinishEvent);
1148
            } else if constexpr (std::is_same_v<T, StopProfile>) {
1149 1150 1151 1152 1153 1154 1155
                RECORD_EVENT(StopProfileEvent);
                for (auto* info: cmd.escape_tensors) {
                    bool has_value = info->status == TensorInfo::Produced;
                    if (has_value) {
                        RECORD_EVENT(TensorReleaseEvent, info->id);
                    }
                    RECORD_EVENT(TensorEraseEvent, info->id);
1156
                }
1157 1158 1159
                CompNode::foreach([&](CompNode device){
                    if (Profiler::get_option("sample_rate", 0)) {
                        sample_on_device(device, true);
1160
                    }
1161 1162
                });
                RECORD_EVENT(StopProfileFinishEvent);
1163
            } else if constexpr (std::is_same_v<T, PushScope>) {
1164
                RECORD_EVENT(ScopeEvent, cmd.scope_name);
1165
            } else if constexpr (std::is_same_v<T, PopScope>) {
1166
                RECORD_EVENT(ScopeFinishEvent, cmd.scope_name);
1167
            } else {
1168
                static_assert(!std::is_same_v<T, T>);
1169
            }
1170
    };
1171
    std::visit([&](const auto& cmd){
1172
        using T = std::decay_t<decltype(cmd)>;
1173
        if (!options.catch_worker_execption) {
1174 1175 1176 1177 1178
            cmd_visitor(cmd);
            return;
        }
        try {
            cmd_visitor(cmd);
1179 1180
        } catch (...) {
            MGB_LOCK_GUARD(m_mutex);
1181 1182 1183 1184 1185 1186 1187
            if constexpr (std::is_same_v<T, ApplyOp>) {
                for (auto oup : cmd.outputs) {
                    oup->invalid = true;
                }
            } else if constexpr (std::is_same_v<T, Put>) {
                cmd.dest->invalid = true;
            }
1188
            m_worker_exc = std::current_exception();
1189 1190 1191 1192
            RECORD_EVENT(WorkerExceptionEvent);
            if (m_waitee) {
                notify_tensor_unsafe(m_waitee);
            }
1193
        }
1194
    }, icmd.data);
1195 1196 1197 1198
}

void ChannelImpl::check_worker_exc_unsafe() {
    if (m_worker_exc) {
1199 1200
        // for reuse interpreter_for_py after some exception tests
        m_waitee = nullptr;
1201 1202
        std::exception_ptr exc;
        std::swap(exc, m_worker_exc);
1203 1204 1205 1206 1207
        try {
            std::rethrow_exception(exc);
        } catch (...) {
            throw AsyncError();
        }
1208 1209
    }
}
1210

1211 1212
void ChannelImpl::CommandBuffer::enqueue(CommandData cmd) {
    auto& state = m_owner->get_channel_state();
1213 1214 1215
    if (std::get_if<Del>(&cmd) && fuse_del(std::get<Del>(cmd))) {
        return;
    }
1216
    // mgb_log_debug("%s Enqueued", to_string(cmd).c_str());
1217
    m_commands.push_back({Profiler::next_id(), std::move(cmd), state.stack_manager.dump()});
1218 1219 1220 1221
    auto flush_pos = flush_pos_for(m_commands.back());
    flush(flush_pos);
}

1222 1223 1224 1225
void ChannelImpl::CommandBuffer::flush() {
    flush(m_commands.end());
}

1226 1227
void ChannelImpl::CommandBuffer::flush(Handle pos) {
    for (auto iter = m_commands.begin(); iter != pos; ++iter) {
1228 1229 1230
        if (Profiler::is_profiling()) {
            mgb_log_debug("%s Flushed", to_string(*iter).c_str());
        }
1231
        m_owner->m_worker.add_task(std::move(*iter));
1232 1233 1234 1235 1236
    }
    m_commands.erase(m_commands.begin(), pos);
}

auto ChannelImpl::CommandBuffer::flush_pos_for(const Command& cmd) -> Handle {
1237
    auto& state = m_owner->get_channel_state();
1238
    return std::visit([this, &state](const auto& cmd) {
1239 1240 1241 1242 1243 1244 1245
        using T = std::decay_t<decltype(cmd)>;
        if constexpr (std::is_same_v<T, ApplyOp>) {
            auto* op_type = cmd.op->dyn_typeinfo();
            if (op_type == RemoteRecv::typeinfo() ||
                op_type == RemoteSend::typeinfo() ||
                op_type == CollectiveComm::typeinfo() ||
                op_type == opr::InputCallback::typeinfo() ||
1246
                op_type == opr::OutputCallback::typeinfo()) {
1247 1248 1249 1250 1251
                return m_commands.end();
            }
        } else if constexpr (std::is_same_v<T, GetValue>) {
            return m_commands.end();
        }
1252
        size_t buffer_length = state.options.buffer_length;
1253 1254
        if (m_commands.size() > buffer_length) {
            return m_commands.begin() + (m_commands.size() - buffer_length);
1255 1256
        }
        return m_commands.begin();
1257
    }, cmd.data);
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
}

/**
 * 1. Find ApplyOp(dest) in buffered commands
 * 2. Check if there are other usages between ApplyOp and Del, return false if not
 * 3. Fuse Del into ApplyOp, return true
 */
bool ChannelImpl::CommandBuffer::fuse_del(const Del& cmd) {
    auto* dest = cmd.dest;
    // TODO: eliminate Puts
    auto begin = m_commands.begin(), end = m_commands.end();
    auto apply_iter = std::find_if(begin, end, [dest](const Command& cmd){
1270
        if (auto* apply = std::get_if<ApplyOp>(&cmd.data)) {
1271 1272 1273 1274 1275 1276 1277
            return std::count(apply->inputs.begin(), apply->inputs.end(), dest) > 0;
        }
        return false;
    });
    if (apply_iter == end || find_last_usage(dest, {apply_iter+1, end}) != end) {
        return false;
    }
1278
    // mgb_log_debug("%s Fused", to_string(Command{cmd}).c_str());
1279
    std::get<ApplyOp>(apply_iter->data).dels.push_back(dest);
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
    return true;
}

auto ChannelImpl::CommandBuffer::find_last_usage(TensorInfo* dest, Range range)
        -> Handle {
    auto found = range[1];
    for (auto iter = range[0]; iter != range[1]; ++iter) {
        std::visit([&](const auto& cmd) {
            using T = std::decay_t<decltype(cmd)>;
            if constexpr (std::is_same_v<T, ApplyOp>) {
                if (std::count(cmd.inputs.begin(), cmd.inputs.end(),
                               dest) > 0) {
                    found = iter;
                }
            } else if constexpr (std::is_same_v<T, GetValue>) {
                if (cmd.dest == dest) {
                    found = iter;
                }
            } else if constexpr (std::is_same_v<T, SwapIn> ||
                    std::is_same_v<T, SwapOut> ||
                    std::is_same_v<T, Drop>) {
                //TODO: ignore swap-like commands, just remove them from buffer
                if (cmd.dest == dest) {
                    found = iter;
                }
            }
1306
        }, iter->data);
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
    };
    return found;
}

auto ChannelImpl::CommandBuffer::find_produce(TensorInfo* dest, Range range)
        -> Handle {
    return std::find_if(range[0], range[1], [dest](auto& cmd) {
        return std::visit([dest](const auto& cmd){
            using T = std::decay_t<decltype(cmd)>;
            if constexpr (std::is_same_v<T, ApplyOp>) {
                return std::count(cmd.outputs.begin(), cmd.outputs.end(), dest) > 0;
            } else if constexpr (std::is_same_v<T, Put>) {
                return cmd.dest == dest;
            }
            return false;
1322
        }, cmd.data);
1323 1324
    });
}
1325

1326
void ChannelImpl::start_profile() {
1327
    MGB_LOCK_GUARD(m_spin);
1328
    mgb_assert(check_available(), "Channel already closed");
1329 1330 1331 1332
    auto capture_tensors = collect_valid_tensors();
    if (capture_tensors.size() > 0) {
        m_buffer.enqueue(StartProfile{std::move(capture_tensors)});
    }
1333 1334
}

1335
void ChannelImpl::stop_profile() {
1336
    MGB_LOCK_GUARD(m_spin);
1337
    mgb_assert(check_available(), "Channel already closed");
1338
    m_buffer.flush();
1339 1340 1341 1342
    auto escape_tensors = collect_valid_tensors();
    if (escape_tensors.size() > 0) {
        m_buffer.enqueue(StopProfile{std::move(escape_tensors)});
    }
1343 1344 1345
}

void ChannelImpl::push_scope(std::string name) {
1346
    MGB_LOCK_GUARD(m_spin);
1347
    mgb_assert(check_available(), "Channel already closed");
1348
    auto& state = get_channel_state();
1349
    state.stack_manager.enter(name);
1350
    RECORD_EVENT(ScopeEvent, name);
1351
    m_buffer.enqueue(PushScope{name});
1352 1353 1354
}

void ChannelImpl::pop_scope(std::string name) {
1355
    MGB_LOCK_GUARD(m_spin);
1356
    mgb_assert(check_available(), "Channel already closed");
1357
    auto& state = get_channel_state();
1358
    state.stack_manager.exit(name);
1359
    RECORD_EVENT(ScopeFinishEvent, name);
1360
    m_buffer.enqueue(PopScope{name});
1361 1362
}

1363 1364 1365 1366 1367 1368 1369 1370
void ChannelImpl::assert_in_channel() {
    mgb_assert(get_worker_tid() != std::this_thread::get_id(), "this method cannot be called in worker thread");
}

void ChannelImpl::assert_in_worker() {
    mgb_assert(get_worker_tid() == std::this_thread::get_id(), "this method can only be called in worker thread");
}

1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
void ChannelImpl::sample_on_device(CompNode device, bool force) {
    if (!force) {
        thread_local int last_sample_id = 0;
        int sample_rate = Profiler::is_profiling() ? Profiler::get_option("sample_rate", 0) : 0;
        if (!sample_rate || ((++last_sample_id) % sample_rate != 0)) {
            return;
        }
    }
    RECORD_EVENT(SampleDeviceEvent, device);
    auto [total, free] = device.get_mem_status_bytes();
    RECORD_EVENT(SampleDeviceFinishEvent, device, total, free);
}

1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
void ChannelImpl::DynamicSublinear::pin(const SmallVector<TensorInfo*>& vec) {
    for (auto i : vec) {
        i->pin();
    }
}

void ChannelImpl::DynamicSublinear::unpin(const SmallVector<TensorInfo*>& vec) {
    for (auto i : vec) {
        i->unpin();
    }
}

void ChannelImpl::DynamicSublinear::update_dsu_after_recompute(TensorInfo* ptr) {
    auto&& dsu_fa = find_father(ptr->dsu_ptr);
    dsu_fa->t -= ptr->compute_time;
    ptr->dsu_ptr->parent.reset();
    ptr->dsu_ptr->t = ptr->compute_time;
}

void ChannelImpl::DynamicSublinear::update_dsu_after_evict(TensorInfo* ptr) {
    for (auto i : ptr->producer->inputs) {
        if (i->evict_type == EvictType::DROP) {
            merge(i->dsu_ptr, ptr->dsu_ptr);
        }
    }
    for (auto i : ptr->producer->outputs) {
        if (i && i->evict_type == EvictType::DROP) {
            merge(ptr->dsu_ptr, i->dsu_ptr);
        }
    }
}

double ChannelImpl::DynamicSublinear::estimate_neighbor_cost(TensorInfo* ptr) {
    double cost = 0;
    for (auto i : ptr->producer->inputs) {
        if (i->evict_type == EvictType::DROP) {
            double t = find_father(i->dsu_ptr)->t;
            if (t < i->compute_time) {
                t = i->compute_time;
            }
            cost += t;
        }
    }
    for (auto i : ptr->producer->outputs) {
        if (i && i->evict_type == EvictType::DROP) {
            double t = find_father(i->dsu_ptr)->t;
            if (t < i->compute_time) {
                t = i->compute_time;
            }
            cost += t;
        }
    }
    return cost;
}

1439
TensorInfo* ChannelImpl::DynamicSublinear::find_best_tensor(bool enable_dtr_sqrt_sampling=false) {
1440 1441
    double min_msps = -1;
    TensorInfo* best = nullptr;
1442 1443 1444 1445 1446 1447
    size_t sz = 1;
    if (enable_dtr_sqrt_sampling) {
        while (sz * sz <= candidates.size()) sz ++;
    } else {
        sz = candidates.size();
    }
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
    for (auto i : candidates) {
        if (i->producer && i->ptr && !i->pinned && i->evict_type == EvictType::NONE) {
            double neighbor_cost = estimate_neighbor_cost(i);
            size_t begin_ptr = reinterpret_cast<size_t>(i->ptr->blob()->storage().get());
            auto side_info = i->ptr->comp_node().get_free_left_and_right(begin_ptr, begin_ptr + i->ptr->blob()->size());
            double free_mem = side_info.first + side_info.second;
            double msps = i->eval_func(neighbor_cost, free_mem, estimate_timestamp, 1.0, 1.0, 1.0, 1.0001);
            if (min_msps < 0 || msps < min_msps) {
                min_msps = msps;
                best = i;
            }
        }
1460
        if (--sz == 0) break;
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
    }
    return best;
}

void ChannelImpl::DynamicSublinear::merge(std::shared_ptr<DsuNode> &x, std::shared_ptr<DsuNode> &y) {
    auto&& f_x = find_father(x);
    auto&& f_y = find_father(y);
    if (f_x.get() == f_y.get()) {
        return;
    }
    f_y->t += f_x->t;
    f_x->parent = f_y;
}

std::shared_ptr<DsuNode> ChannelImpl::DynamicSublinear::find_father(std::shared_ptr<DsuNode>& x) {
    if (x->is_root()) {
        return x;
    } else {
        auto&& fa = find_father(x->parent);
        return x->parent = fa;
    }
}

void ChannelImpl::DynamicSublinear::insert_candidate(TensorInfo* ptr) {
    candidates.insert(ptr);
    if (!comp_node.valid()) {
        comp_node = ptr->ptr->comp_node();
    }
}

void ChannelImpl::DynamicSublinear::erase_candidate(TensorInfo* ptr) {
    candidates.erase(ptr);
}

void ChannelImpl::DynamicSublinear::update_used_time(TensorInfo* ptr) {
    ptr->last_used_time = estimate_timestamp;
}