提交 81915f6b 编写于 作者: M Megvii Engine Team 提交者: Xinran Xu

feat(sdk/load_and_run): support npy/json/npy/ppm/pgm as input

GitOrigin-RevId: f0aa7abe85f38325db1a34967e966d41bd4b638a
上级 df8931b6
......@@ -25,6 +25,9 @@ Copyright (c) 2017-2020 ARM Software
6. maskrcnn-benchmark
Copyright (c) 2018 Facebook
7. libnpy
Copyright (c) 2017 Leon Merten Lohse
Terms of the MIT License:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
......
......@@ -4,3 +4,8 @@ add_executable (load_and_run ${SOURCES})
target_link_libraries (load_and_run megbrain)
install (TARGETS load_and_run RUNTIME DESTINATION bin)
if(MGE_WITH_TEST)
add_executable(json_loader_test test/json_loader_test.cpp src/json_loader.h src/json_loader.cpp)
target_link_libraries (json_loader_test megbrain)
endif()
......@@ -65,3 +65,80 @@ python3 dump_with_testcase_mge.py --help
```
load_and_run xornet.mge
```
## `load_and_run --input` the dumped mge file
You can also use `--input` to set mge file's input, this argument support these 4 formats:
1. PPM/PGM image file.
PPM/PGM is supported by OpenCV and simple to parse, you can easily use `cv::imwrite` to generate one.
```
load_and_run model.mge --input "data:image.ppm"
```
`data` is blob name and `image.ppm` is file path, we use `:` to seperate key and value. Please note that `"` is necessary in terminal.
2. npy file.
npy is `Numpy` file format, here is a Python example
```
import numpy as np
import cv2
mat = cv2.imread('file.jpg')
np.save('image.npy', mat)
arr = np.array([[[1.1, 1.2],[100, 200.0]]], dtype=np.float32)
np.save('bbox.npy', arr)
```
then `load_and_run` the model
```
load_and_run model.mge --input data:image.npy;bbox.npy
```
3. json format.
For json format, you have to identify data type and blob shape. Here is a Python example
```
import numpy as np
import json
import cv2
bbox = np.array([[[1.1, 1.2],[100, 200.0]]], dtype=np.float32)
obj = dict()
obj['shape'] = bbox.shape
obj['raw'] = bbox.flatten().tolist()
obj['type'] = str(bbox.dtype)
json_object = dict()
json_object['bbox'] = obj
json_str = json.dumps(json_object)
with open('bbox.json', 'w') as f:
f.write(json_str)
f.flush()
f.close()
```
The json loader in `load_and_run` is not fully implement [RFC7159](https://tools.ietf.org/html/rfc7159), it does not support `boolean` and `utf` string format which is useless during inference.
Now let's `load-and-run` the model with json file
```
load_and_run model.mge --input data:image.npy:bbox:bbox.json
```
Mutiple key-value pair could be seperated with `;`.
4. plain string format.
Also, you can give the value directly
```
load_and_run model.mge --input data:image.ppm --input "bbox:[0,0],[200.0,200.0]" --input "batchid:0"
```
1. `bbox` shape is `[1,2,2]` for `[0,0],[200.0,200.0]`. In order to facilitate user experience, the string parser would add an extra axis for input, thus `bbox:0` is correspond to `[1]` and `bbox:[0]` means that the shape is `[1,1]`
2. Since we can only identify `int32` and `float32` from this format, don't forget `.` for float number.
/**
* \file sdk/load-and-run/src/json_loader.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "json_loader.h"
using namespace mgb;
template <typename T>
T* JsonLoader::Value::safe_cast() {
T* ptr = (T*)(this);
if (nullptr == ptr) {
fprintf(stderr, "cast ptr is null\n");
}
return ptr;
}
std::unique_ptr<JsonLoader::Value>& JsonLoader::Value::operator[](
const std::string& key) {
mgb_assert(Type::OBJECT == m_type);
auto t = safe_cast<JsonLoader::ObjectValue>();
return t->m_obj.at(key);
}
std::unique_ptr<JsonLoader::Value>& JsonLoader::Value::operator[](
const size_t index) {
mgb_assert(Type::ARRAY == m_type);
auto t = safe_cast<JsonLoader::ArrayValue>();
return t->m_obj[index];
}
std::map<std::string, std::unique_ptr<JsonLoader::Value>>&
JsonLoader::Value::objects() {
mgb_assert(Type::OBJECT == m_type);
auto t = safe_cast<JsonLoader::ObjectValue>();
return t->m_obj;
}
size_t JsonLoader::Value::len() {
if (Type::ARRAY == m_type) {
auto t = safe_cast<JsonLoader::ArrayValue>();
return t->m_obj.size();
} else if (Type::OBJECT == m_type) {
auto t = safe_cast<JsonLoader::ObjectValue>();
return t->m_obj.size();
}
return 0;
}
megdnn::SmallVector<std::unique_ptr<JsonLoader::Value>>&
JsonLoader::Value::array() {
mgb_assert(Type::ARRAY == m_type);
auto t = safe_cast<JsonLoader::ArrayValue>();
return t->m_obj;
}
double JsonLoader::Value::number() {
mgb_assert(Type::NUMBER == m_type);
auto t = safe_cast<JsonLoader::NumberValue>();
return t->value();
}
std::string JsonLoader::Value::str() {
if (Type::STRING == m_type) {
auto t = safe_cast<StringValue>();
return t->value();
}
return std::string();
}
void JsonLoader::expect(char c) {
mgb_assert(c == (*m_buf));
m_buf++;
}
void JsonLoader::skip_whitespace() {
const char* p = m_buf;
while (*p == ' ' || *p == '\t' || *p == '\n' || *p == '\r') {
++p;
}
m_buf = p;
}
std::unique_ptr<JsonLoader::Value> JsonLoader::parse_object() {
expect('{');
skip_whitespace();
std::unique_ptr<JsonLoader::Value> ret;
JsonLoader::ObjectValue* pObject = new JsonLoader::ObjectValue();
if ('}' == *m_buf) {
m_buf = m_buf + 1;
ret.reset((JsonLoader::Value*)(pObject));
return ret;
}
while (true) {
std::unique_ptr<JsonLoader::Value> key = parse_string();
if (m_state != State::OK) {
return ret;
}
skip_whitespace();
if (':' != (*m_buf)) {
m_state = State::MISS_COLON;
return ret;
}
m_buf++;
skip_whitespace();
std::unique_ptr<JsonLoader::Value> pVal = parse_value();
if (m_state != State::OK) {
return ret;
}
if (pObject->m_obj.find(pVal->str()) != pObject->m_obj.end()) {
m_state = State::KEY_NOT_UNIQUE;
return ret;
}
pObject->m_obj.insert(std::make_pair(key->str(), std::move(pVal)));
skip_whitespace();
if (',' == (*m_buf)) {
m_buf++;
skip_whitespace();
} else if ('}' == (*m_buf)) {
m_buf++;
break;
} else {
m_state = State::MISS_BRACE;
break;
}
}
ret.reset((JsonLoader::Value*)(pObject));
return ret;
}
std::unique_ptr<JsonLoader::Value> JsonLoader::parse_array() {
expect('[');
skip_whitespace();
std::unique_ptr<JsonLoader::Value> ret;
JsonLoader::ArrayValue* pArray = new JsonLoader::ArrayValue();
if (']' == *m_buf) {
m_buf = m_buf + 1;
ret.reset((JsonLoader::Value*)(pArray));
return ret;
}
while (true) {
std::unique_ptr<JsonLoader::Value> pVal = parse_value();
if (m_state != State::OK) {
mgb_assert(0, "parse value failed during pase array");
return ret;
}
pArray->m_obj.emplace_back(pVal.get());
pVal.release();
skip_whitespace();
if (',' == *m_buf) {
m_buf++;
skip_whitespace();
} else if (']' == *m_buf) {
m_buf++;
break;
} else {
m_state = State::BAD_ARRAY;
return ret;
}
}
ret.reset((JsonLoader::Value*)(pArray));
return ret;
}
std::unique_ptr<JsonLoader::Value> JsonLoader::parse_string() {
expect('\"');
std::unique_ptr<JsonLoader::Value> ret;
JsonLoader::StringValue* pStr = new JsonLoader::StringValue();
const char* p = m_buf;
while (true) {
if (*p == '\"') {
p++;
break;
} else {
pStr->m_value += (*p);
p++;
}
}
m_buf = p;
ret.reset((JsonLoader::Value*)(pStr));
return ret;
}
std::unique_ptr<JsonLoader::Value> JsonLoader::parse_number() {
const char* p = m_buf;
auto loop_digit = [this](const char*& p) {
if (not std::isdigit(*p)) {
m_state = State::BAD_DIGIT;
return;
}
while (std::isdigit(*p)) {
p++;
}
return;
};
if (*p == '-')
p++;
if (*p == '0')
p++;
else {
loop_digit(std::ref(p));
}
if (*p == '.') {
p++;
loop_digit(std::ref(p));
}
if (*p == 'e' || *p == 'E') {
p++;
if (*p == '+' || *p == '-')
p++;
loop_digit(std::ref(p));
}
JsonLoader::NumberValue* pNum = new JsonLoader::NumberValue();
pNum->m_value = strtod(m_buf, nullptr);
m_buf = p;
std::unique_ptr<JsonLoader::Value> ret;
ret.reset((JsonLoader::Value*)(pNum));
return ret;
}
std::unique_ptr<JsonLoader::Value> JsonLoader::parse_value() {
switch (*m_buf) {
case '[':
return parse_array();
case '{':
return parse_object();
case '\"':
return parse_string();
case '\0':
m_state = State::BAD_TYPE;
break;
default:
return parse_number();
}
return nullptr;
}
std::unique_ptr<JsonLoader::Value> JsonLoader::load(const char* content,
const size_t size) {
m_buf = content;
skip_whitespace();
std::unique_ptr<JsonLoader::Value> value = parse_value();
skip_whitespace();
if (m_state != State::OK) {
return nullptr;
}
mgb_assert(size == static_cast<size_t>(m_buf - content));
return value;
}
std::unique_ptr<JsonLoader::Value> JsonLoader::load(const char* path) {
std::unique_ptr<std::FILE, void (*)(std::FILE*)> fin(
std::fopen(path, "rb"), [](std::FILE* fp) { std::fclose(fp); });
mgb_assert(fin.get(), "failed to open %s: %s", path, strerror(errno));
std::fseek(fin.get(), 0, SEEK_END);
const size_t size = ftell(fin.get());
std::fseek(fin.get(), 0, SEEK_SET);
std::unique_ptr<char> buf(static_cast<char*>(malloc(size)));
auto nr = std::fread(buf.get(), 1, size, fin.get());
mgb_assert(nr == size);
return load(buf.get(), size);
}
/**
* \file sdk/load-and-run/src/json_loader.h
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#pragma once
#include <cctype>
#include <fstream>
#include <functional>
#include <iostream>
#include <map>
#include <memory>
#include "megbrain/common.h"
#include "megdnn/thin/small_vector.h"
namespace mgb {
class JsonLoader {
public:
class Value {
protected:
enum struct Type : uint8_t { UNKNOWN, NUMBER, STRING, OBJECT, ARRAY };
Type m_type;
public:
template <typename T>
T* safe_cast();
Value() { m_type = Type::UNKNOWN; }
Value(Type type) : m_type(type) {}
virtual ~Value() {}
bool is_array() { return Type::ARRAY == m_type; }
bool is_object() { return Type::OBJECT == m_type; }
bool is_number() { return Type::NUMBER == m_type; }
bool is_str() { return Type::STRING == m_type; }
std::unique_ptr<Value>& operator[](const std::string& key);
std::unique_ptr<Value>& operator[](const size_t index);
std::map<std::string, std::unique_ptr<Value>>& objects();
size_t len();
megdnn::SmallVector<std::unique_ptr<Value>>& array();
double number();
std::string str();
};
void expect(char c);
void skip_whitespace();
std::unique_ptr<Value> parse_object();
std::unique_ptr<Value> parse_array();
std::unique_ptr<Value> parse_string();
std::unique_ptr<Value> parse_number();
std::unique_ptr<Value> parse_value();
enum struct State : uint8_t {
OK = 0,
BAD_TYPE,
BAD_DIGIT,
BAD_ARRAY,
MISS_COLON,
MISS_BRACE,
KEY_NOT_UNIQUE
};
JsonLoader() { m_state = State::OK; }
std::unique_ptr<Value> load(const char* content, const size_t size);
std::unique_ptr<Value> load(const char* path);
class NumberValue final : public Value {
friend std::unique_ptr<Value> JsonLoader::parse_number();
double m_value;
public:
NumberValue() : Value(Type::NUMBER) {}
double value() { return m_value; }
};
class StringValue final : public Value {
std::string m_value;
public:
StringValue() : Value(Type::STRING) {}
std::string value() { return m_value; }
friend std::unique_ptr<Value> JsonLoader::parse_string();
};
class ArrayValue final : public Value {
megdnn::SmallVector<std::unique_ptr<Value>> m_obj;
public:
ArrayValue() : Value(Type::ARRAY) {}
ArrayValue(ArrayValue& arr) : Value(arr) {
m_obj.clear();
for (auto& item : arr.m_obj) {
m_obj.emplace_back(item.get());
item.release();
}
}
ArrayValue(ArrayValue&& arr) : Value(arr) {
m_obj.clear();
for (auto& item : arr.m_obj) {
m_obj.emplace_back(item.get());
item.release();
}
}
friend std::unique_ptr<Value> JsonLoader::parse_array();
friend std::unique_ptr<JsonLoader::Value>& JsonLoader::Value::
operator[](const size_t index);
friend megdnn::SmallVector<std::unique_ptr<JsonLoader::Value>>&
JsonLoader::Value::array();
friend size_t JsonLoader::Value::len();
};
class ObjectValue final : public Value {
std::map<std::string, std::unique_ptr<Value>> m_obj;
public:
ObjectValue() : Value(Type::OBJECT) {}
ObjectValue(ObjectValue& arr) : Value(arr) {
m_obj.clear();
for (auto itra = arr.m_obj.begin(); itra != arr.m_obj.end();
++itra) {
m_obj.emplace(
std::make_pair(itra->first, std::move(itra->second)));
}
}
ObjectValue(ObjectValue&& arr) : Value(arr) {
m_obj.clear();
for (auto itra = arr.m_obj.begin(); itra != arr.m_obj.end();
++itra) {
m_obj.emplace(
std::make_pair(itra->first, std::move(itra->second)));
}
}
friend std::unique_ptr<Value> JsonLoader::parse_object();
friend std::unique_ptr<JsonLoader::Value>& JsonLoader::Value::
operator[](const std::string&);
friend std::map<std::string, std::unique_ptr<JsonLoader::Value>>&
JsonLoader::Value::objects();
friend size_t JsonLoader::Value::len();
};
private:
const char* m_buf;
State m_state;
};
} // namespace mgb
......@@ -11,6 +11,8 @@
#include "./mgblar.h"
#include "./infile_persistent_cache.h"
#include "./json_loader.h"
#include "./npy.h"
#include "megbrain/utils/debug.h"
#include "megbrain/serialization/serializer.h"
......@@ -33,6 +35,8 @@
#include <cstring>
#include <cerrno>
#include <cstdio>
#include <cctype>
#include <numeric>
#include <sstream>
#if defined(_WIN32)
......@@ -78,6 +82,11 @@ R"__usage__(
profiling device time, which may cause additional overhead and make it
hard to profile host time. Use --profile-host to focus on host time
profiling.
--input [ filepath | string]
Set up inputs for megbrain model. for example: --data image.ppm --data
param.json --data bbox:bbox.npy@batchid:b.npy --data rect:[0,0,227,227];
batchid:0,1,2,3. --io-dump or --bin-io-dump
should be enabled at the same time.
--io-dump <output> | --bin-io-dump <output dir>
Dump input/output values of all internal variables to output file or
directory, in text or binary format. The binary file can be parsed by
......@@ -183,6 +192,273 @@ R"__usage__(
;
struct DataParser {
struct Brace {
Brace() { parent = nullptr; }
std::shared_ptr<Brace> parent;
std::vector<std::shared_ptr<Brace>> chidren;
};
void feed(const std::string& path) {
std::string blob_name = "data", blob_string = path;
size_t sep = path.find(":");
if (sep != std::string::npos) {
blob_name = path.substr(0, sep);
blob_string = path.substr(sep + 1);
}
auto endWith = [blob_string](std::string suffix) -> bool {
return blob_string.rfind(suffix) ==
(blob_string.length() - suffix.length());
};
if (endWith(".ppm") || endWith(".pgm")) {
parse_image(blob_name, blob_string);
} else if (endWith(".json")) {
parse_json(blob_string);
} else if (endWith(".npy")) {
parse_npy(blob_name, blob_string);
} else {
parse_string(blob_name, blob_string);
}
}
std::map<std::string, HostTensorND> inputs;
private:
void parse_json(const std::string& path) {
JsonLoader json;
std::shared_ptr<JsonLoader::Value> root = json.load(path.c_str());
mgb_assert(root != nullptr, "parse json %s fail", path.c_str());
// parse json to data map
const std::string SHAPE = "shape", TYPE = "type", RAW = "raw";
for (auto& item : root->objects()) {
auto&& value = *item.second;
auto&& shape = value[SHAPE];
mgb_assert(shape->is_array());
auto&& type = value[TYPE];
mgb_assert(type->is_str());
auto&& raw = value[RAW];
mgb_assert(raw->is_array());
megdnn::SmallVector<size_t> data_shape;
for (auto&& shape_ptr : shape->array()) {
data_shape.append(
{static_cast<size_t>(std::round(shape_ptr->number()))});
}
// get type
const std::map<std::string, megdnn::DType> type_map = {
{"float32", dtype::Float32()}, {"float", dtype::Float32()},
{"int32", dtype::Int32()}, {"int", dtype::Int32()},
{"int8", dtype::Int8()}, {"uint8", dtype::Uint8()}};
const std::string& type_str = type->str();
mgb_assert(type_map.find(type_str) != type_map.end(),
"unknown json data type for --data");
DType datatype = type_map.at(type_str);
HostTensorND hv;
hv.comp_node(mgb::CompNode::default_cpu(), true)
.dtype(datatype)
.resize(data_shape);
dt_byte* raw_ptr = hv.raw_ptr();
size_t elem_size = datatype.size();
// get raw
const size_t array_size = raw->len();
for (size_t idx = 0; idx < array_size; ++idx) {
double tmp = (*raw)[idx]->number();
switch (datatype.enumv()) {
case megdnn::DTypeEnum::Int32: {
int32_t ival = std::round(tmp);
memcpy(raw_ptr + idx * elem_size, &ival, elem_size);
} break;
case megdnn::DTypeEnum::Uint8:
case megdnn::DTypeEnum::Int8: {
int8_t cval = std::round(tmp);
memcpy(raw_ptr + idx, &cval, sizeof(int8_t));
} break;
case megdnn::DTypeEnum::Float32: {
float fval = tmp;
memcpy(raw_ptr + idx * elem_size, &fval, elem_size);
} break;
default:
break;
}
}
inputs.insert(std::make_pair(item.first, std::move(hv)));
}
}
void parse_image(const std::string& name, const std::string& path) {
// load ppm/pgm
std::ifstream fin;
fin.open(path, std::ifstream::binary | std::ifstream::in);
mgb_assert(fin.is_open(), "open file %s failed for --input",
path.c_str());
size_t w = 0, h = 0, channel = 0;
char buf[128] = {0};
fin.getline(buf, 128);
if ('5' == buf[1]) {
channel = 1;
} else if ('6' == buf[1]) {
channel = 3;
} else {
mgb_assert(0, "not a formal ppm/pgm");
}
while (fin.getline(buf, 128)) {
// skip OCV comment, check
// https://github.com/opencv/opencv/pull/17006
if (buf[0] == '#') {
continue;
}
break;
}
std::stringstream ss;
ss << std::string(buf);
ss >> w;
ss >> h;
mgb_assert(w > 0 and h > 0);
HostTensorND hv;
hv.comp_node(mgb::CompNode::default_cpu(), true)
.dtype(dtype::Uint8())
.resize({1, h, w, channel});
fin.read((char*)(hv.raw_ptr()), hv.layout().total_nr_elems());
fin.close();
inputs.insert(std::make_pair(name, std::move(hv)));
}
void parse_npy(const std::string& name, const std::string& path) {
std::string type_str;
std::vector<npy::ndarray_len_t> stl_shape;
std::vector<int8_t> raw;
npy::LoadArrayFromNumpy(path, type_str, stl_shape, raw);
megdnn::SmallVector<size_t> shape;
for (auto val : stl_shape) {
shape.append({static_cast<size_t>(val)});
}
const std::map<std::string, megdnn::DType> type_map = {
{"f4", dtype::Float32()},
{"i4", dtype::Int32()},
{"i1", dtype::Int8()},
{"u1", dtype::Uint8()}};
megdnn::DType hv_type;
for (auto& item : type_map) {
if (type_str.find(item.first) != std::string::npos) {
hv_type = item.second;
break;
}
}
HostTensorND hv;
hv.comp_node(mgb::CompNode::default_cpu(), true)
.dtype(hv_type)
.resize(shape);
dt_byte* raw_ptr = hv.raw_ptr();
memcpy(raw_ptr, raw.data(), raw.size());
inputs.insert(std::make_pair(name, std::move(hv)));
}
void parse_string(const std::string name, const std::string& str) {
// data type
megdnn::DType data_type = dtype::Int32();
if (str.find(".") != std::string::npos or
str.find(".") != std::string::npos) {
data_type = dtype::Float32();
}
// shape
size_t number_cnt = 0;
std::shared_ptr<Brace> brace_root = std::make_shared<Brace>();
std::shared_ptr<Brace> cur = brace_root;
for (size_t i = 0; i < str.size(); ++i) {
char c = str[i];
if (c == '[') {
std::shared_ptr<Brace> child = std::make_shared<Brace>();
child->parent = cur;
cur->chidren.emplace_back(child);
cur = child;
} else if (c == ']') {
cur = cur->parent;
} else if (c == ',') {
number_cnt++;
}
continue;
}
++number_cnt;
mgb_assert(cur == brace_root, "braces not closed for --input");
megdnn::SmallVector<size_t> shape;
cur = brace_root;
while (not cur->chidren.empty()) {
shape.append({cur->chidren.size()});
number_cnt /= cur->chidren.size();
cur = cur->chidren[0];
}
mgb_assert(number_cnt > 0);
shape.append({number_cnt});
// data
std::string json_arr;
for (size_t i = 0; i < str.size(); ++i) {
char c = str[i];
if (c != '[' and c != ']') {
json_arr += c;
}
}
json_arr = "[" + json_arr + "]";
// reuse json parser to resolve raw data
JsonLoader json;
std::shared_ptr<JsonLoader::Value> json_root =
json.load(json_arr.data(), json_arr.size());
mgb_assert(json_root != nullptr, "parse json fail in parse_string");
HostTensorND hv;
hv.comp_node(mgb::CompNode::default_cpu(), true)
.dtype(data_type)
.resize(shape);
dt_byte* raw_ptr = hv.raw_ptr();
const size_t array_len = json_root->len();
const size_t elem_size = data_type.size();
for (size_t idx = 0; idx < array_len; ++idx) {
double tmp = json_root->array()[idx]->number();
switch (data_type.enumv()) {
case megdnn::DTypeEnum::Int32: {
int32_t ival = std::round(tmp);
memcpy(raw_ptr + idx * elem_size, &ival, elem_size);
} break;
case megdnn::DTypeEnum::Float32: {
float fval = tmp;
memcpy(raw_ptr + idx * elem_size, &fval, elem_size);
} break;
default:
break;
}
}
inputs.insert(std::make_pair(name, std::move(hv)));
};
};
struct Args {
int args_parse_ret = 0;
......@@ -200,6 +476,7 @@ struct Args {
int nr_thread = 1;
int multithread_number = 1;
size_t workspace_limit = SIZE_MAX;
std::vector<std::string> data_files;
serialization::GraphLoader::LoadResult load_ret;
#if MGB_ENABLE_JSON
std::unique_ptr<GraphProfiler> profiler;
......@@ -481,6 +758,32 @@ void run_test_st(Args &env) {
}
printf("=== total time: %.3fms\n", tot_time);
} else if (not env.data_files.empty()) {
auto& tensormap = env.load_ret.tensor_map;
DataParser parser;
for (auto path : env.data_files) {
parser.feed(path);
}
auto inputs = parser.inputs;
for (auto& i : inputs) {
if (tensormap.find(i.first) == tensormap.end()) {
continue;
}
auto& in = tensormap.find(i.first)->second;
in->copy_from(i.second);
}
timer.reset();
func->execute();
auto exec_time = timer.get_msecs();
func->wait();
output_dumper.write_to_file();
auto cur = timer.get_msecs();
printf("%.3fms %.3fms (device=%.3f)\n", cur, exec_time,
func->get_prev_exec_time() * 1e3);
} else {
// run speed test for a raw mgb graph
mgb_assert(env.load_ret.tensor_map.empty(),
......@@ -607,7 +910,7 @@ Args Args::from_argv(int argc, char **argv) {
ret.multithread_number](CompNode::Locator& loc) {
loc.type = CompNode::DeviceType::MULTITHREAD;
loc.device = 0;
loc.nr_threads = nr_threads;
loc.stream = nr_threads;
};
continue;
}
......@@ -692,6 +995,25 @@ Args Args::from_argv(int argc, char **argv) {
continue;
}
#endif
if (!strcmp(argv[i], "--input")) {
++i;
mgb_assert(i < argc, "input file not given for --input");
size_t start = 0;
std::string cmd = argv[i];
while (true) {
auto end = cmd.find(";", start);
if (end == std::string::npos) {
ret.data_files.emplace_back(cmd.substr(start));
break;
}
std::string substr = cmd.substr(start, end);
ret.data_files.emplace_back(substr);
start = end + 1;
}
continue;
}
if (!strcmp(argv[i], "--io-dump")) {
mgb_log_warn("enable opr io dump");
++ i;
......@@ -712,7 +1034,7 @@ Args Args::from_argv(int argc, char **argv) {
continue;
}
if (!strcmp(argv[i], "--bin-out-dump")) {
++i;
++ i;
mgb_assert(i < argc,
"output directory not given for --bin-out-dump");
ret.bin_out_dump = argv[i];
......
/*
Copyright 2017 Leon Merten Lohse
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef NPY_H
#define NPY_H
#include <algorithm>
#include <complex>
#include <cstdint>
#include <cstring>
#include <fstream>
#include <iostream>
#include <regex>
#include <sstream>
#include <stdexcept>
#include <string>
#include <unordered_map>
#include <vector>
namespace npy {
/* Compile-time test for byte order.
If your compiler does not define these per default, you may want to define
one of these constants manually.
Defaults to little endian order. */
#if defined(__BYTE_ORDER) && __BYTE_ORDER == __BIG_ENDIAN || \
defined(__BIG_ENDIAN__) || defined(__ARMEB__) || \
defined(__THUMBEB__) || defined(__AARCH64EB__) || defined(_MIBSEB) || \
defined(__MIBSEB) || defined(__MIBSEB__)
const bool big_endian = true;
#else
const bool big_endian = false;
#endif
const char magic_string[] = "\x93NUMPY";
const size_t magic_string_length = 6;
const char little_endian_char = '<';
const char big_endian_char = '>';
const char no_endian_char = '|';
constexpr char host_endian_char =
(big_endian ? big_endian_char : little_endian_char);
/* npy array length */
typedef unsigned long int ndarray_len_t;
inline void write_magic(std::ostream& ostream, unsigned char v_major = 1,
unsigned char v_minor = 0) {
ostream.write(magic_string, magic_string_length);
ostream.put(v_major);
ostream.put(v_minor);
}
inline void read_magic(std::istream& istream, unsigned char& v_major,
unsigned char& v_minor) {
char buf[magic_string_length + 2];
istream.read(buf, magic_string_length + 2);
if (!istream) {
fprintf(stderr, "io error: failed reading file");
}
if (0 != std::memcmp(buf, magic_string, magic_string_length)) {
fprintf(stderr, "this file does not have a valid npy format.");
}
v_major = buf[magic_string_length];
v_minor = buf[magic_string_length + 1];
}
// typestring magic
struct Typestring {
private:
char c_endian;
char c_type;
int len;
public:
inline std::string str() {
const size_t max_buflen = 16;
char buf[max_buflen];
std::sprintf(buf, "%c%c%u", c_endian, c_type, len);
return std::string(buf);
}
Typestring(const std::vector<float>&)
: c_endian{host_endian_char}, c_type{'f'}, len{sizeof(float)} {}
Typestring(const std::vector<double>&)
: c_endian{host_endian_char}, c_type{'f'}, len{sizeof(double)} {}
Typestring(const std::vector<long double>&)
: c_endian{host_endian_char},
c_type{'f'},
len{sizeof(long double)} {}
Typestring(const std::vector<char>&)
: c_endian{no_endian_char}, c_type{'i'}, len{sizeof(char)} {}
Typestring(const std::vector<short>&)
: c_endian{host_endian_char}, c_type{'i'}, len{sizeof(short)} {}
Typestring(const std::vector<int>&)
: c_endian{host_endian_char}, c_type{'i'}, len{sizeof(int)} {}
Typestring(const std::vector<long>&)
: c_endian{host_endian_char}, c_type{'i'}, len{sizeof(long)} {}
Typestring(const std::vector<long long>&)
: c_endian{host_endian_char}, c_type{'i'}, len{sizeof(long long)} {}
Typestring(const std::vector<unsigned char>&)
: c_endian{no_endian_char},
c_type{'u'},
len{sizeof(unsigned char)} {}
Typestring(const std::vector<unsigned short>&)
: c_endian{host_endian_char},
c_type{'u'},
len{sizeof(unsigned short)} {}
Typestring(const std::vector<unsigned int>&)
: c_endian{host_endian_char},
c_type{'u'},
len{sizeof(unsigned int)} {}
Typestring(const std::vector<unsigned long>&)
: c_endian{host_endian_char},
c_type{'u'},
len{sizeof(unsigned long)} {}
Typestring(const std::vector<unsigned long long>&)
: c_endian{host_endian_char},
c_type{'u'},
len{sizeof(unsigned long long)} {}
Typestring(const std::vector<std::complex<float>>&)
: c_endian{host_endian_char},
c_type{'c'},
len{sizeof(std::complex<float>)} {}
Typestring(const std::vector<std::complex<double>>&)
: c_endian{host_endian_char},
c_type{'c'},
len{sizeof(std::complex<double>)} {}
Typestring(const std::vector<std::complex<long double>>&)
: c_endian{host_endian_char},
c_type{'c'},
len{sizeof(std::complex<long double>)} {}
};
inline void parse_typestring(std::string typestring) {
std::regex re("'([<>|])([ifuc])(\\d+)'");
std::smatch sm;
std::regex_match(typestring, sm, re);
if (sm.size() != 4) {
fprintf(stderr, "invalid typestring");
}
}
namespace pyparse {
/**
Removes leading and trailing whitespaces
*/
inline std::string trim(const std::string& str) {
const std::string whitespace = " \t";
auto begin = str.find_first_not_of(whitespace);
if (begin == std::string::npos)
return "";
auto end = str.find_last_not_of(whitespace);
return str.substr(begin, end - begin + 1);
}
inline std::string get_value_from_map(const std::string& mapstr) {
size_t sep_pos = mapstr.find_first_of(":");
if (sep_pos == std::string::npos)
return "";
std::string tmp = mapstr.substr(sep_pos + 1);
return trim(tmp);
}
/**
Parses the string representation of a Python dict
The keys need to be known and may not appear anywhere else in the data.
*/
inline std::unordered_map<std::string, std::string> parse_dict(
std::string in, std::vector<std::string>& keys) {
std::unordered_map<std::string, std::string> map;
if (keys.size() == 0)
return map;
in = trim(in);
// unwrap dictionary
if ((in.front() == '{') && (in.back() == '}'))
in = in.substr(1, in.length() - 2);
else {
fprintf(stderr, "Not a Python dictionary.");
}
std::vector<std::pair<size_t, std::string>> positions;
for (auto const& value : keys) {
size_t pos = in.find("'" + value + "'");
if (pos == std::string::npos) {
fprintf(stderr, "Missing %s key.", value.c_str());
}
std::pair<size_t, std::string> position_pair{pos, value};
positions.push_back(position_pair);
}
// sort by position in dict
std::sort(positions.begin(), positions.end());
for (size_t i = 0; i < positions.size(); ++i) {
std::string raw_value;
size_t begin{positions[i].first};
size_t end{std::string::npos};
std::string key = positions[i].second;
if (i + 1 < positions.size())
end = positions[i + 1].first;
raw_value = in.substr(begin, end - begin);
raw_value = trim(raw_value);
if (raw_value.back() == ',')
raw_value.pop_back();
map[key] = get_value_from_map(raw_value);
}
return map;
}
/**
Parses the string representation of a Python boolean
*/
inline bool parse_bool(const std::string& in) {
if (in == "True")
return true;
if (in == "False")
return false;
fprintf(stderr, "Invalid python boolan.");
return false;
}
/**
Parses the string representation of a Python str
*/
inline std::string parse_str(const std::string& in) {
if ((in.front() == '\'') && (in.back() == '\''))
return in.substr(1, in.length() - 2);
fprintf(stderr, "Invalid python string.");
return "";
}
/**
Parses the string represenatation of a Python tuple into a vector of its items
*/
inline std::vector<std::string> parse_tuple(std::string in) {
std::vector<std::string> v;
const char seperator = ',';
in = trim(in);
if ((in.front() == '(') && (in.back() == ')'))
in = in.substr(1, in.length() - 2);
else {
fprintf(stderr, "Invalid Python tuple.");
}
std::istringstream iss(in);
for (std::string token; std::getline(iss, token, seperator);) {
v.push_back(token);
}
return v;
}
template <typename T>
inline std::string write_tuple(const std::vector<T>& v) {
if (v.size() == 0)
return "";
std::ostringstream ss;
if (v.size() == 1) {
ss << "(" << v.front() << ",)";
} else {
const std::string delimiter = ", ";
// v.size() > 1
ss << "(";
std::copy(v.begin(), v.end() - 1,
std::ostream_iterator<T>(ss, delimiter.c_str()));
ss << v.back();
ss << ")";
}
return ss.str();
}
inline std::string write_boolean(bool b) {
if (b)
return "True";
else
return "False";
}
} // namespace pyparse
inline void parse_header(std::string header, std::string& descr) {
/*
The first 6 bytes are a magic string: exactly "x93NUMPY".
The next 1 byte is an unsigned byte: the major version number of the file
format, e.g. x01. The next 1 byte is an unsigned byte: the minor version
number of the file format, e.g. x00. Note: the version of the file format
is not tied to the version of the numpy package. The next 2 bytes form a
little-endian unsigned short int: the length of the header data
HEADER_LEN. The next HEADER_LEN bytes form the header data describing the
array's format. It is an ASCII string which contains a Python literal
expression of a dictionary. It is terminated by a newline ('n') and
padded with spaces
('x20') to make the total length of the magic string + 4 + HEADER_LEN be
evenly divisible by 16 for alignment purposes. The dictionary contains
three keys:
"descr" : dtype.descr
An object that can be passed as an argument to the numpy.dtype()
constructor to create the array's dtype. For repeatability and
readability, this dictionary is formatted using pprint.pformat() so the
keys are in alphabetic order.
*/
// remove trailing newline
if (header.back() != '\n')
fprintf(stderr, "invalid header");
header.pop_back();
// parse the dictionary
std::vector<std::string> keys{"descr"};
auto dict_map = npy::pyparse::parse_dict(header, keys);
if (dict_map.size() == 0)
fprintf(stderr, "invalid dictionary in header");
std::string descr_s = dict_map["descr"];
parse_typestring(descr_s);
// remove
descr = npy::pyparse::parse_str(descr_s);
return;
}
inline void parse_header(std::string header, std::string& descr,
bool& fortran_order,
std::vector<ndarray_len_t>& shape) {
/*
The first 6 bytes are a magic string: exactly "x93NUMPY".
The next 1 byte is an unsigned byte: the major version number of the file
format, e.g. x01. The next 1 byte is an unsigned byte: the minor version
number of the file format, e.g. x00. Note: the version of the file format
is not tied to the version of the numpy package. The next 2 bytes form a
little-endian unsigned short int: the length of the header data
HEADER_LEN. The next HEADER_LEN bytes form the header data describing the
array's format. It is an ASCII string which contains a Python literal
expression of a dictionary. It is terminated by a newline ('n') and
padded with spaces
('x20') to make the total length of the magic string + 4 + HEADER_LEN be
evenly divisible by 16 for alignment purposes. The dictionary contains
three keys:
"descr" : dtype.descr
An object that can be passed as an argument to the numpy.dtype()
constructor to create the array's dtype. "fortran_order" : bool Whether
the array data is Fortran-contiguous or not. Since Fortran-contiguous
arrays are a common form of non-C-contiguity, we allow them to be written
directly to disk for efficiency. "shape" : tuple of int The shape of the
array. For repeatability and readability, this dictionary is formatted
using pprint.pformat() so the keys are in alphabetic order.
*/
// remove trailing newline
if (header.back() != '\n')
fprintf(stderr, "invalid header");
header.pop_back();
// parse the dictionary
std::vector<std::string> keys{"descr", "fortran_order", "shape"};
auto dict_map = npy::pyparse::parse_dict(header, keys);
if (dict_map.size() == 0)
fprintf(stderr, "invalid dictionary in header");
std::string descr_s = dict_map["descr"];
std::string fortran_s = dict_map["fortran_order"];
std::string shape_s = dict_map["shape"];
// TODO: extract info from typestring
parse_typestring(descr_s);
// remove
descr = npy::pyparse::parse_str(descr_s);
// convert literal Python bool to C++ bool
fortran_order = npy::pyparse::parse_bool(fortran_s);
// parse the shape tuple
auto shape_v = npy::pyparse::parse_tuple(shape_s);
if (shape_v.size() == 0)
fprintf(stderr, "invalid shape tuple in header");
for (auto item : shape_v) {
ndarray_len_t dim = static_cast<ndarray_len_t>(std::stoul(item));
shape.push_back(dim);
}
}
inline std::string write_header_dict(const std::string& descr,
bool fortran_order,
const std::vector<ndarray_len_t>& shape) {
std::string s_fortran_order = npy::pyparse::write_boolean(fortran_order);
std::string shape_s = npy::pyparse::write_tuple(shape);
return "{'descr': '" + descr + "', 'fortran_order': " + s_fortran_order +
", 'shape': " + shape_s + ", }";
}
inline void write_header(std::ostream& out, const std::string& descr,
bool fortran_order,
const std::vector<ndarray_len_t>& shape_v) {
std::string header_dict = write_header_dict(descr, fortran_order, shape_v);
size_t length = magic_string_length + 2 + 2 + header_dict.length() + 1;
unsigned char version[2] = {1, 0};
if (length >= 255 * 255) {
length = magic_string_length + 2 + 4 + header_dict.length() + 1;
version[0] = 2;
version[1] = 0;
}
size_t padding_len = 16 - length % 16;
std::string padding(padding_len, ' ');
// write magic
write_magic(out, version[0], version[1]);
// write header length
if (version[0] == 1 && version[1] == 0) {
char header_len_le16[2];
uint16_t header_len = static_cast<uint16_t>(header_dict.length() +
padding.length() + 1);
header_len_le16[0] = (header_len >> 0) & 0xff;
header_len_le16[1] = (header_len >> 8) & 0xff;
out.write(reinterpret_cast<char*>(header_len_le16), 2);
} else {
char header_len_le32[4];
uint32_t header_len = static_cast<uint32_t>(header_dict.length() +
padding.length() + 1);
header_len_le32[0] = (header_len >> 0) & 0xff;
header_len_le32[1] = (header_len >> 8) & 0xff;
header_len_le32[2] = (header_len >> 16) & 0xff;
header_len_le32[3] = (header_len >> 24) & 0xff;
out.write(reinterpret_cast<char*>(header_len_le32), 4);
}
out << header_dict << padding << '\n';
}
inline std::string read_header(std::istream& istream) {
// check magic bytes an version number
unsigned char v_major, v_minor;
read_magic(istream, v_major, v_minor);
uint32_t header_length = 0;
if (v_major == 1 && v_minor == 0) {
char header_len_le16[2];
istream.read(header_len_le16, 2);
header_length = (header_len_le16[0] << 0) | (header_len_le16[1] << 8);
if ((magic_string_length + 2 + 2 + header_length) % 16 != 0) {
// TODO: display warning
}
} else if (v_major == 2 && v_minor == 0) {
char header_len_le32[4];
istream.read(header_len_le32, 4);
header_length = (header_len_le32[0] << 0) | (header_len_le32[1] << 8) |
(header_len_le32[2] << 16) | (header_len_le32[3] << 24);
if ((magic_string_length + 2 + 4 + header_length) % 16 != 0) {
// TODO: display warning
}
} else {
fprintf(stderr, "unsupported file format version");
}
auto buf_v = std::vector<char>();
buf_v.reserve(header_length);
istream.read(buf_v.data(), header_length);
std::string header(buf_v.data(), header_length);
return header;
}
inline ndarray_len_t comp_size(const std::vector<ndarray_len_t>& shape) {
ndarray_len_t size = 1;
for (ndarray_len_t i : shape)
size *= i;
return size;
}
template <typename Scalar>
inline void SaveArrayAsNumpy(const std::string& filename, bool fortran_order,
unsigned int n_dims, const unsigned long shape[],
const std::vector<Scalar>& data) {
Typestring typestring_o(data);
std::string typestring = typestring_o.str();
std::ofstream stream(filename, std::ofstream::binary);
if (!stream) {
fprintf(stderr, "io error: failed to open a file.");
}
std::vector<ndarray_len_t> shape_v(shape, shape + n_dims);
write_header(stream, typestring, fortran_order, shape_v);
auto size = static_cast<size_t>(comp_size(shape_v));
stream.write(reinterpret_cast<const char*>(data.data()),
sizeof(Scalar) * size);
}
template <typename Scalar>
inline void LoadArrayFromNumpy(const std::string& filename,
std::vector<unsigned long>& shape,
std::vector<Scalar>& data) {
bool fortran_order;
LoadArrayFromNumpy<Scalar>(filename, shape, fortran_order, data);
}
template <typename Scalar>
inline void LoadArrayFromNumpy(const std::string& filename,
std::vector<unsigned long>& shape,
bool& fortran_order, std::vector<Scalar>& data) {
std::ifstream stream(filename, std::ifstream::binary);
if (!stream) {
fprintf(stderr, "io error: failed to open a file.");
}
std::string header = read_header(stream);
// parse header
std::string typestr;
parse_header(header, typestr, fortran_order, shape);
// check if the typestring matches the given one
Typestring typestring_o{data};
std::string expect_typestr = typestring_o.str();
if (typestr != expect_typestr) {
fprintf(stderr, "formatting error: typestrings not matching");
}
// compute the data size based on the shape
auto size = static_cast<size_t>(comp_size(shape));
data.resize(size);
// read the data
stream.read(reinterpret_cast<char*>(data.data()), sizeof(Scalar) * size);
}
inline void LoadArrayFromNumpy(const std::string& filename,
std::string& type_str,
std::vector<ndarray_len_t>& shape,
std::vector<int8_t>& data) {
std::ifstream stream(filename, std::ifstream::binary);
if (!stream) {
fprintf(stderr, "io error: failed to open a file.");
}
std::string header = read_header(stream);
bool fortran_order;
// parse header
parse_header(header, type_str, fortran_order, shape);
// check if the typestring matches the given one
std::string size_str = type_str.substr(type_str.size() - 1);
size_t elem_size = atoi(size_str.c_str());
// compute the data size based on the shape
auto byte_size = elem_size * static_cast<size_t>(comp_size(shape));
data.resize(byte_size);
// read the data
stream.read(reinterpret_cast<char*>(data.data()), byte_size);
}
} // namespace npy
#endif // NPY_H
/**
* \file sdk/load-and-run/test/test_json_loader.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include <cfloat>
#include <cstdint>
#include <cstdio>
#include <cmath>
#include "../src/json_loader.h"
using namespace mgb;
void test_number(double real, std::string str) {
JsonLoader json;
auto root = json.load(str.data(), str.size());
mgb_assert(root->is_number());
mgb_assert(std::fabs(real - root->number()) <= DBL_EPSILON);
}
void test_string(std::string str, std::string json_str) {
JsonLoader json;
auto root = json.load(json_str.data(), json_str.size());
mgb_assert(root->is_str());
mgb_assert(str == root->str());
}
void test_array(size_t num, std::string str) {
JsonLoader json;
auto root = json.load(str.data(), str.size());
mgb_assert(root->is_array());
mgb_assert(root->len() == num);
}
void test_object(size_t num, std::string str) {
JsonLoader json;
auto root = json.load(str.data(), str.size());
mgb_assert(root->is_object());
mgb_assert(root->len() == num);
}
int main() {
test_number(1.0, "1.0");
test_number(1e10, "1e10");
test_number(0.2345678, "0.02345678e1");
test_number(-10086, "-1.0086E4");
test_number(1.7976931348623157e+308,
"1.7976931348623157e+308"); // max double
test_string("a", "\"a\"");
test_string("\\table", "\"\\table\"");
test_array(0, " [ ] ");
test_array(4, " [ 0.1, 0.2,0.3, 1990 ] ");
test_array(2, " [ 0.1, \"hello-world\"]");
test_array(3, " [ 0.1, \"hello-world\", [2.0, 33]]");
test_array(1, " [ [ [ [2020] ], [2021], [[2022]] ] ]");
test_object(0, " { } ");
test_object(1, "{\"key1\": 2023}");
test_object(1,
"{\"key1\": { \"key2\": { "
"\"key3\": \"value\" } } }");
test_object(1, "{\"key1\":{\"key2\":{}}}");
printf("test passed\n");
return 0;
}
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册