io_options.cpp 10.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
/**
 * \file lite/load_and_run/src/options/io_options.cpp
 *
 * This file is part of MegEngine, a deep learning framework developed by
 * Megvii.
 *
 * \copyright Copyright (c) 2020-2021 Megvii Inc. All rights reserved.
 */

#include <map>

#include "helpers/data_parser.h"
#include "misc.h"
#include "models/model_lite.h"
#include "models/model_mdl.h"

#include "io_options.h"
namespace lar {
template <>
void InputOption::config_model_internel<ModelLite>(
        RuntimeParam& runtime_param, std::shared_ptr<ModelLite> model) {
    if (runtime_param.stage == RunStage::BEFORE_MODEL_LOAD) {
        auto parser = model->get_input_parser();
        auto io = model->get_networkIO();
        for (size_t idx = 0; idx < data_path.size(); ++idx) {
            parser.feed(data_path[idx].c_str());
        }

        auto inputs = parser.inputs;
        bool is_host = true;
        for (auto& i : inputs) {
            io.inputs.push_back({i.first, is_host});
        }
    } else if (runtime_param.stage == RunStage::AFTER_MODEL_LOAD) {
        auto config = model->get_config();
        auto parser = model->get_input_parser();
        auto network = model->get_lite_network();

        //! datd type map from mgb data type to lite data type
        std::map<megdnn::DTypeEnum, LiteDataType> type_map = {
                {megdnn::DTypeEnum::Float32, LiteDataType::LITE_FLOAT},
                {megdnn::DTypeEnum::Int32, LiteDataType::LITE_INT},
                {megdnn::DTypeEnum::Int8, LiteDataType::LITE_INT8},
                {megdnn::DTypeEnum::Uint8, LiteDataType::LITE_UINT8}};

        for (auto& i : parser.inputs) {
            //! get tensor information from data parser
            auto tensor = i.second;
            auto data_type = tensor.dtype();
            auto tensor_shape = tensor.shape();
            mgb::dt_byte* src = tensor.raw_ptr();

            //! set lite layout
            lite::Layout layout;
            layout.ndim = tensor_shape.ndim;
            for (size_t idx = 0; idx < tensor_shape.ndim; idx++) {
                layout.shapes[idx] = tensor_shape[idx];
            }
            layout.data_type = type_map[data_type.enumv()];

            //! set network input tensor
            std::shared_ptr<lite::Tensor> input_tensor =
                    network->get_io_tensor(i.first);
            input_tensor->reset(src, layout);
        }
    }
}

template <>
void InputOption::config_model_internel<ModelMdl>(
        RuntimeParam& runtime_param, std::shared_ptr<ModelMdl> model) {
    if (runtime_param.stage == RunStage::BEFORE_MODEL_LOAD) {
        auto parser = model->get_input_parser();
        for (size_t idx = 0; idx < data_path.size(); ++idx) {
            parser.feed(data_path[idx].c_str());
        }
    } else if (runtime_param.stage == RunStage::AFTER_MODEL_LOAD) {
        auto parser = model->get_input_parser();
        auto network = model->get_mdl_load_result();
        auto tensormap = network.tensor_map;
        for (auto& i : parser.inputs) {
            mgb_assert(
                    tensormap.find(i.first) != tensormap.end(),
                    "can't find tesnor named %s", i.first.c_str());
            auto& in = tensormap.find(i.first)->second;
            in->copy_from(i.second);
        }
    }
}

template <>
void IOdumpOption::config_model_internel<ModelLite>(
        RuntimeParam& runtime_param, std::shared_ptr<ModelLite> model) {
    if (runtime_param.stage == RunStage::AFTER_MODEL_LOAD) {
        if (enable_io_dump) {
            LITE_WARN("enable text io dump");
            lite::Runtime::enable_io_txt_dump(model->get_lite_network(), dump_path);
        }
        if (enable_bin_io_dump) {
            LITE_WARN("enable binary io dump");
            lite::Runtime::enable_io_bin_dump(model->get_lite_network(), dump_path);
        }
        //! FIX:when add API in lite complate this
        if (enable_io_dump_stdout || enable_io_dump_stderr) {
            LITE_THROW("lite model don't support the stdout or stderr io dump");
        }
        if (enable_bin_out_dump) {
            LITE_THROW("lite model don't support the binary output dump");
        }
        if (enable_copy_to_host) {
            LITE_WARN("lite model set copy to host defaultly");
        }
    }
}

template <>
void IOdumpOption::config_model_internel<ModelMdl>(
        RuntimeParam& runtime_param, std::shared_ptr<ModelMdl> model) {
    if (runtime_param.stage == RunStage::BEFORE_MODEL_LOAD) {
        if (enable_io_dump) {
            mgb_log_warn("enable text io dump");
            auto iodump = std::make_unique<mgb::TextOprIODump>(
                    model->get_mdl_config().comp_graph.get(), dump_path.c_str());
            iodump->print_addr(false);
            io_dumper = std::move(iodump);
        }

        if (enable_io_dump_stdout) {
            mgb_log_warn("enable text io dump to stdout");
            std::shared_ptr<FILE> std_out(stdout, [](FILE*) {});
            auto iodump = std::make_unique<mgb::TextOprIODump>(
                    model->get_mdl_config().comp_graph.get(), std_out);
            iodump->print_addr(false);
            io_dumper = std::move(iodump);
        }

        if (enable_io_dump_stderr) {
            mgb_log_warn("enable text io dump to stderr");
            std::shared_ptr<FILE> std_err(stderr, [](FILE*) {});
            auto iodump = std::make_unique<mgb::TextOprIODump>(
                    model->get_mdl_config().comp_graph.get(), std_err);
            iodump->print_addr(false);
            io_dumper = std::move(iodump);
        }

        if (enable_bin_io_dump) {
            mgb_log_warn("enable binary io dump");
            auto iodump = std::make_unique<mgb::BinaryOprIODump>(
                    model->get_mdl_config().comp_graph.get(), dump_path);
            io_dumper = std::move(iodump);
        }

        if (enable_bin_out_dump) {
            mgb_log_warn("enable binary output dump");
            out_dumper = std::make_unique<OutputDumper>(dump_path.c_str());
        }
    } else if (runtime_param.stage == RunStage::AFTER_MODEL_LOAD) {
        if (enable_bin_out_dump) {
            auto load_result = model->get_mdl_load_result();
            out_dumper->set(load_result.output_var_list);

            std::vector<mgb::ComputingGraph::Callback> cb;
            for (size_t i = 0; i < load_result.output_var_list.size(); i++) {
                cb.push_back(out_dumper->bind());
            }
            model->set_output_callback(cb);
        }
        if (enable_copy_to_host) {
            auto load_result = model->get_mdl_load_result();

            std::vector<mgb::ComputingGraph::Callback> cb;
            for (size_t i = 0; i < load_result.output_var_list.size(); i++) {
                mgb::HostTensorND val;
                auto callback = [val](const mgb::DeviceTensorND& dv) mutable {
                    val.copy_from(dv);
                };
                cb.push_back(callback);
            }
            model->set_output_callback(cb);
        }
    } else if (runtime_param.stage == RunStage::AFTER_RUNNING_WAIT) {
        if (enable_bin_out_dump) {
            out_dumper->write_to_file();
        }
    }
}

}  // namespace lar

////////////////////// Input options ////////////////////////
using namespace lar;

InputOption::InputOption() {
    m_option_name = "input";
    size_t start = 0;
    auto end = FLAGS_input.find(";", start);
    while (end != std::string::npos) {
        std::string path = FLAGS_input.substr(start, end - start);
        data_path.emplace_back(path);
        start = end + 1;
        end = FLAGS_input.find(";", start);
    }
    data_path.emplace_back(FLAGS_input.substr(start));
}

std::shared_ptr<lar::OptionBase> lar::InputOption::create_option() {
    static std::shared_ptr<InputOption> m_option(new InputOption);
    if (InputOption::is_valid()) {
        return std::static_pointer_cast<OptionBase>(m_option);
    } else {
        return nullptr;
    }
}

void InputOption::config_model(
        RuntimeParam& runtime_param, std::shared_ptr<ModelBase> model) {
    CONFIG_MODEL_FUN;
}

////////////////////// OprIOdump options ////////////////////////

IOdumpOption::IOdumpOption() {
    m_option_name = "iodump";
    size_t valid_flag = 0;
    if (!FLAGS_io_dump.empty()) {
        dump_path = FLAGS_io_dump;
        enable_io_dump = true;
        valid_flag = valid_flag | (1 << 0);
    }
    if (!FLAGS_bin_io_dump.empty()) {
        dump_path = FLAGS_bin_io_dump;
        enable_bin_io_dump = true;
        valid_flag = valid_flag | (1 << 1);
    }
    if (!FLAGS_bin_out_dump.empty()) {
        dump_path = FLAGS_bin_out_dump;
        enable_bin_out_dump = true;
        valid_flag = valid_flag | (1 << 2);
    }
    if (FLAGS_io_dump_stdout) {
        enable_io_dump_stdout = FLAGS_io_dump_stdout;
        valid_flag = valid_flag | (1 << 3);
    }
    if (FLAGS_io_dump_stderr) {
        enable_io_dump_stderr = FLAGS_io_dump_stderr;
        valid_flag = valid_flag | (1 << 4);
    }
    // not only one dump set valid
    if (valid_flag && (valid_flag & (valid_flag - 1))) {
        mgb_log_warn(
                "ONLY the last io dump option is validate and others is "
                "skipped!!!");
    }

    enable_copy_to_host = FLAGS_copy_to_host;
}

bool IOdumpOption::is_valid() {
    bool ret = !FLAGS_io_dump.empty();
    ret = ret || FLAGS_io_dump_stdout;
    ret = ret || FLAGS_io_dump_stderr;
    ret = ret || !FLAGS_bin_io_dump.empty();
    ret = ret || !FLAGS_bin_out_dump.empty();
    ret = ret || FLAGS_copy_to_host;
    return ret;
}

std::shared_ptr<OptionBase> IOdumpOption::create_option() {
    static std::shared_ptr<IOdumpOption> option(new IOdumpOption);
    if (IOdumpOption::is_valid()) {
        return std::static_pointer_cast<OptionBase>(option);
    } else {
        return nullptr;
    }
}

void IOdumpOption::config_model(
        RuntimeParam& runtime_param, std::shared_ptr<ModelBase> model) {
    CONFIG_MODEL_FUN;
}
////////////////////// Input gflags ////////////////////////
DEFINE_string(
        input, "", "Set up inputs data for model --input [ file_path | data_string]");

////////////////////// OprIOdump gflags ////////////////////////

DEFINE_string(io_dump, "", "set the io dump file path in text format");
DEFINE_bool(io_dump_stdout, false, "dump io opr to stdout in text format");
DEFINE_bool(io_dump_stderr, false, "dump io opr to stderr in text format");
DEFINE_string(bin_io_dump, "", "set the io dump file path in binary format");
DEFINE_string(bin_out_dump, "", "set the out dump file path in binary format");
DEFINE_bool(copy_to_host, false, "copy device data to host");

REGIST_OPTION_CREATOR(input, lar::InputOption::create_option);
REGIST_OPTION_CREATOR(iodump, lar::IOdumpOption::create_option);