提交 42160b31 编写于 作者: Y yejianwu

load model in pb

上级 3b11a1b2
......@@ -16,6 +16,7 @@ cc_library(
srcs = ["statistics.cc"],
hdrs = ["statistics.h"],
linkstatic = 1,
copts = ["-Werror"],
deps = [
"//mace/kernels",
"//mace/utils",
......@@ -28,7 +29,7 @@ cc_binary(
"benchmark_model.cc",
],
copts = if_android(["-DMACE_ENABLE_OPENCL"]),
linkopts = if_openmp_enabled(["-fopenmp"]),
linkopts = ["-Werror"] + if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
deps = [
":statistics",
......@@ -40,6 +41,7 @@ cc_binary(
cc_library(
name = "libmace_merged",
copts = ["-Werror"],
srcs = [
"libmace_merged.a",
],
......@@ -51,6 +53,7 @@ cc_binary(
srcs = ["model_throughput_test.cc"],
linkopts = if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
copts = ["-Werror"],
deps = [
":libmace_merged",
"//external:gflags_nothreads",
......
......@@ -24,6 +24,7 @@
#include "mace/public/mace.h"
#include "mace/public/mace_runtime.h"
#include "mace/utils/logging.h"
#include "mace/utils/utils.h"
#include "mace/benchmark/statistics.h"
#include "mace/codegen/engine/mace_engine_factory.h"
......@@ -189,6 +190,8 @@ DEFINE_string(max_time, "10.0", "length to run max");
DEFINE_int32(warmup_runs, 1, "how many runs to initialize model");
DEFINE_string(model_data_file, "",
"model data file name, used when EMBED_MODEL_DATA set to 0");
DEFINE_string(model_file, "",
"model file name, used when load mace model in pb");
DEFINE_int32(gpu_perf_hint, 3, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH");
DEFINE_int32(gpu_priority_hint, 3, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH");
DEFINE_int32(omp_num_threads, -1, "num of openmp threads");
......@@ -202,6 +205,7 @@ int Main(int argc, char **argv) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
LOG(INFO) << "Model name: [" << FLAGS_model_name << "]";
LOG(INFO) << "Model_file: " << FLAGS_model_file;
LOG(INFO) << "Device: [" << FLAGS_device << "]";
LOG(INFO) << "gpu_perf_hint: [" << FLAGS_gpu_perf_hint << "]";
LOG(INFO) << "gpu_priority_hint: [" << FLAGS_gpu_priority_hint << "]";
......@@ -268,18 +272,25 @@ int Main(int argc, char **argv) {
std::shared_ptr<mace::MaceEngine> engine;
MaceStatus create_engine_status;
// Create Engine
if (FLAGS_model_data_file.empty()) {
const char *model_data_file_ptr =
FLAGS_model_data_file.empty() ? nullptr : FLAGS_model_data_file.c_str();
if (FLAGS_model_file != "") {
std::vector<unsigned char> model_pb_data;
if (!mace::ReadBinaryFile(&model_pb_data, FLAGS_model_file)) {
LOG(FATAL) << "Failed to read file: " << FLAGS_model_file;
}
create_engine_status =
CreateMaceEngine(FLAGS_model_name.c_str(),
nullptr,
model_data_file_ptr,
input_names,
output_names,
device_type,
&engine);
&engine,
model_pb_data);
} else {
create_engine_status =
CreateMaceEngine(FLAGS_model_name.c_str(),
FLAGS_model_data_file.c_str(),
model_data_file_ptr,
input_names,
output_names,
device_type,
......
......@@ -46,7 +46,7 @@ extern const unsigned char *LoadModelData(const char *model_data_file);
extern void UnloadModelData(const unsigned char *model_data);
extern NetDef CreateNet(const unsigned char *model_data);
extern NetDef CreateNet(const std::vector<unsigned char> &model_pb = {});
extern const std::string ModelChecksum();
......@@ -60,7 +60,7 @@ extern const unsigned char *LoadModelData(const char *model_data_file);
extern void UnloadModelData(const unsigned char *model_data);
extern NetDef CreateNet(const unsigned char *model_data);
extern NetDef CreateNet(const std::vector<unsigned char> &model_pb = {});
extern const std::string ModelChecksum();
......@@ -74,7 +74,7 @@ extern const unsigned char *LoadModelData(const char *model_data_file);
extern void UnloadModelData(const unsigned char *model_data);
extern NetDef CreateNet(const unsigned char *model_data);
extern NetDef CreateNet(const std::vector<unsigned char> &model_pb = {});
extern const std::string ModelChecksum();
......@@ -255,10 +255,10 @@ int Main(int argc, char **argv) {
const unsigned char *cpu_model_data =
mace::MACE_CPU_MODEL_TAG::LoadModelData(
FLAGS_cpu_model_data_file.c_str());
NetDef cpu_net_def = mace::MACE_CPU_MODEL_TAG::CreateNet(cpu_model_data);
NetDef cpu_net_def = mace::MACE_CPU_MODEL_TAG::CreateNet();
mace::MaceEngine cpu_engine(&cpu_net_def, DeviceType::CPU, input_names,
output_names);
output_names, cpu_model_data);
LOG(INFO) << "CPU Warm up run";
t0 = NowMicros();
......@@ -273,10 +273,10 @@ int Main(int argc, char **argv) {
const unsigned char *gpu_model_data =
mace::MACE_GPU_MODEL_TAG::LoadModelData(
FLAGS_gpu_model_data_file.c_str());
NetDef gpu_net_def = mace::MACE_GPU_MODEL_TAG::CreateNet(gpu_model_data);
NetDef gpu_net_def = mace::MACE_GPU_MODEL_TAG::CreateNet();
mace::MaceEngine gpu_engine(&gpu_net_def, DeviceType::GPU, input_names,
output_names);
output_names, gpu_model_data);
mace::MACE_GPU_MODEL_TAG::UnloadModelData(gpu_model_data);
LOG(INFO) << "GPU Warm up run";
......@@ -292,10 +292,10 @@ int Main(int argc, char **argv) {
const unsigned char *dsp_model_data =
mace::MACE_DSP_MODEL_TAG::LoadModelData(
FLAGS_dsp_model_data_file.c_str());
NetDef dsp_net_def = mace::MACE_DSP_MODEL_TAG::CreateNet(dsp_model_data);
NetDef dsp_net_def = mace::MACE_DSP_MODEL_TAG::CreateNet();
mace::MaceEngine dsp_engine(&dsp_net_def, DeviceType::HEXAGON, input_names,
output_names);
output_names, dsp_model_data);
mace::MACE_DSP_MODEL_TAG::UnloadModelData(dsp_model_data);
LOG(INFO) << "DSP Warm up run";
......
......@@ -17,7 +17,7 @@
#include <set>
#include "mace/kernels/conv_pool_2d_util.h"
#include "mace/public/mace_types.h"
#include "mace/proto/mace.pb.h"
#include "mace/utils/logging.h"
#include "mace/utils/string_util.h"
......@@ -59,10 +59,10 @@ std::string ShapeToString(const std::vector<OutputShape> &output_shape) {
std::stringstream stream;
stream << "[";
for (size_t i = 0; i < output_shape.size(); ++i) {
const std::vector<index_t> &dims = output_shape[i].dims();
for (size_t j = 0; j < dims.size(); ++j) {
stream << dims[j];
if (j != dims.size() - 1) {
size_t dims_size = output_shape[i].dims_size();
for (size_t j = 0; j < dims_size; ++j) {
stream << output_shape[i].dims(j);
if (j != dims_size - 1) {
stream << ",";
}
}
......
......@@ -10,6 +10,7 @@ cc_library(
srcs = glob(["models/*/*.cc"]),
hdrs = glob(["models/*/*.h"]),
linkstatic = 1,
copts = ["-Werror"],
deps = [
"//mace/core",
"//mace/ops",
......@@ -19,24 +20,28 @@ cc_library(
cc_library(
name = "generated_opencl",
srcs = glob(["opencl/*.cc"]),
copts = ["-Werror"],
linkstatic = 1,
)
cc_library(
name = "generated_tuning_params",
srcs = ["tuning/tuning_params.cc"],
copts = ["-Werror"],
linkstatic = 1,
)
cc_library(
name = "generated_version",
srcs = ["version/version.cc"],
copts = ["-Werror"],
linkstatic = 1,
)
cc_library(
name = "generated_mace_engine_factory",
hdrs = ["engine/mace_engine_factory.h"],
copts = ["-Werror"],
deps = [
"//mace/public",
],
......
......@@ -42,7 +42,9 @@ cc_library(
"runtime/opencl/*.h",
],
)) + if_hexagon_enabled(glob(["runtime/hexagon/*.h"])),
copts = if_openmp_enabled([
copts = [
"-Werror",
] + if_openmp_enabled([
"-fopenmp",
"-DMACE_ENABLE_OPENMP",
]) + if_android([
......@@ -53,8 +55,9 @@ cc_library(
"-lm",
]),
deps = [
"//mace/utils",
"//mace/codegen:generated_version",
"//mace/proto:mace_cc",
"//mace/utils",
] + if_android([
":opencl_headers",
"//mace/codegen:generated_opencl",
......@@ -74,6 +77,7 @@ cc_library(
hdrs = glob([
"runtime/opencl/*.h",
]),
copts = ["-Werror"],
deps = [
"@opencl_clhpp//:opencl_clhpp",
"@opencl_headers//:opencl20_headers",
......@@ -90,6 +94,7 @@ cc_library(
hdrs = [
"testing/test_benchmark.h",
],
copts = ["-Werror"],
deps = [
":core",
"//external:gflags_nothreads",
......
......@@ -19,8 +19,8 @@
#include <vector>
#include <map>
#include "mace/proto/mace.pb.h"
#include "mace/public/mace.h"
#include "mace/public/mace_types.h"
namespace mace {
......
......@@ -85,7 +85,8 @@ class MaceEngine::Impl {
explicit Impl(const NetDef *net_def,
DeviceType device_type,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes);
const std::vector<std::string> &output_nodes,
const unsigned char *model_data);
~Impl();
MaceStatus Run(const std::map<std::string, MaceTensor> &inputs,
......@@ -107,7 +108,8 @@ class MaceEngine::Impl {
MaceEngine::Impl::Impl(const NetDef *net_def,
DeviceType device_type,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes)
const std::vector<std::string> &output_nodes,
const unsigned char *model_data)
: op_registry_(new OperatorRegistry()),
device_type_(device_type),
ws_(new Workspace()),
......@@ -136,14 +138,14 @@ MaceEngine::Impl::Impl(const NetDef *net_def,
int dsp_mode =
ArgumentHelper::GetSingleArgument<NetDef, int>(*net_def, "dsp_mode", 0);
hexagon_controller_->SetGraphMode(dsp_mode);
MACE_CHECK(hexagon_controller_->SetupGraph(*net_def),
MACE_CHECK(hexagon_controller_->SetupGraph(*net_def, model_data),
"hexagon setup graph error");
if (VLOG_IS_ON(2)) {
hexagon_controller_->PrintGraph();
}
} else {
#endif
ws_->LoadModelTensor(*net_def, device_type);
ws_->LoadModelTensor(*net_def, device_type, model_data);
// Init model
auto net = CreateNet(op_registry_, *net_def, ws_.get(), device_type,
......@@ -247,9 +249,11 @@ MaceStatus MaceEngine::Impl::Run(
MaceEngine::MaceEngine(const NetDef *net_def,
DeviceType device_type,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes) {
const std::vector<std::string> &output_nodes,
const unsigned char *model_data) {
impl_ = std::unique_ptr<MaceEngine::Impl>(
new MaceEngine::Impl(net_def, device_type, input_nodes, output_nodes));
new MaceEngine::Impl(net_def, device_type, input_nodes, output_nodes,
model_data));
}
MaceEngine::~MaceEngine() = default;
......
// Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include <numeric>
#include "mace/public/mace_types.h"
#include "mace/utils/logging.h"
namespace mace {
ConstTensor::ConstTensor(const std::string &name,
const unsigned char *data,
const std::vector<int64_t> &dims,
const DataType data_type,
uint32_t node_id)
: name_(name),
data_(data),
data_size_(std::accumulate(
dims.begin(), dims.end(), 1, std::multiplies<int64_t>())),
dims_(dims.begin(), dims.end()),
data_type_(data_type),
node_id_(node_id) {}
ConstTensor::ConstTensor(const std::string &name,
const unsigned char *data,
const std::vector<int64_t> &dims,
const int data_type,
uint32_t node_id)
: name_(name),
data_(data),
data_size_(std::accumulate(
dims.begin(), dims.end(), 1, std::multiplies<int64_t>())),
dims_(dims.begin(), dims.end()),
data_type_(static_cast<DataType>(data_type)),
node_id_(node_id) {}
const std::string &ConstTensor::name() const { return name_; }
const unsigned char *ConstTensor::data() const { return data_; }
int64_t ConstTensor::data_size() const { return data_size_; }
const std::vector<int64_t> &ConstTensor::dims() const { return dims_; }
DataType ConstTensor::data_type() const { return data_type_; }
uint32_t ConstTensor::node_id() const { return node_id_; }
Argument::Argument() : has_bits_(0) {}
void Argument::CopyFrom(const Argument &from) {
this->name_ = from.name();
this->f_ = from.f();
this->i_ = from.i();
this->s_ = from.s();
auto floats = from.floats();
this->floats_.resize(floats.size());
std::copy(floats.begin(), floats.end(), this->floats_.begin());
auto ints = from.ints();
this->ints_.resize(ints.size());
std::copy(ints.begin(), ints.end(), this->ints_.begin());
auto strings = from.floats();
this->strings_.resize(strings.size());
std::copy(floats.begin(), floats.end(), this->floats_.begin());
this->has_bits_ = from.has_bits_;
}
const std::string &Argument::name() const { return name_; }
void Argument::set_name(const std::string &value) { name_ = value; }
bool Argument::has_f() const { return (has_bits_ & 0x00000001u) != 0; }
void Argument::set_has_f() { has_bits_ |= 0x00000001u; }
float Argument::f() const { return f_; }
void Argument::set_f(float value) {
set_has_f();
f_ = value;
}
bool Argument::has_i() const { return (has_bits_ & 0x00000002u) != 0; }
void Argument::set_has_i() { has_bits_ |= 0x00000002u; }
int64_t Argument::i() const { return i_; }
void Argument::set_i(int64_t value) {
set_has_i();
i_ = value;
}
bool Argument::has_s() const { return (has_bits_ & 0x00000004u) != 0; }
void Argument::set_has_s() { has_bits_ |= 0x00000004u; }
std::string Argument::s() const { return s_; }
void Argument::set_s(const std::string &value) {
set_has_s();
s_ = value;
}
const std::vector<float> &Argument::floats() const { return floats_; }
void Argument::add_floats(float value) { floats_.push_back(value); }
void Argument::set_floats(const std::vector<float> &value) {
floats_.resize(value.size());
std::copy(value.begin(), value.end(), floats_.begin());
}
const std::vector<int64_t> &Argument::ints() const { return ints_; }
void Argument::add_ints(int64_t value) { ints_.push_back(value); }
void Argument::set_ints(const std::vector<int64_t> &value) {
ints_.resize(value.size());
std::copy(value.begin(), value.end(), ints_.begin());
}
const std::vector<std::string> &Argument::strings() const { return strings_; }
void Argument::add_strings(const ::std::string &value) {
strings_.push_back(value);
}
void Argument::set_strings(const std::vector<std::string> &value) {
strings_.resize(value.size());
std::copy(value.begin(), value.end(), strings_.begin());
}
// Node Input
NodeInput::NodeInput(int node_id, int output_port)
: node_id_(node_id), output_port_(output_port) {}
void NodeInput::CopyFrom(const NodeInput &from) {
node_id_ = from.node_id();
output_port_ = from.output_port();
}
int NodeInput::node_id() const { return node_id_; }
void NodeInput::set_node_id(int node_id) { node_id_ = node_id; }
int NodeInput::output_port() const { return output_port_; }
void NodeInput::set_output_port(int output_port) { output_port_ = output_port; }
// OutputShape
OutputShape::OutputShape() {}
OutputShape::OutputShape(const std::vector<int64_t> &dims)
: dims_(dims.begin(), dims.end()) {}
void OutputShape::CopyFrom(const OutputShape &from) {
auto from_dims = from.dims();
dims_.resize(from_dims.size());
std::copy(from_dims.begin(), from_dims.end(), dims_.begin());
}
const std::vector<int64_t> &OutputShape::dims() const { return dims_; }
// Operator Def
void OperatorDef::CopyFrom(const OperatorDef &from) {
name_ = from.name();
type_ = from.type();
auto from_input = from.input();
input_.resize(from_input.size());
std::copy(from_input.begin(), from_input.end(), input_.begin());
auto from_output = from.output();
output_.resize(from_output.size());
std::copy(from_output.begin(), from_output.end(), output_.begin());
auto from_arg = from.arg();
arg_.resize(from_arg.size());
for (size_t i = 0; i < from_arg.size(); ++i) {
arg_[i].CopyFrom(from_arg[i]);
}
auto from_output_shape = from.output_shape();
output_shape_.resize(from_output_shape.size());
for (size_t i = 0; i < from_output_shape.size(); ++i) {
output_shape_[i].CopyFrom(from_output_shape[i]);
}
auto from_data_type = from.output_type();
output_type_.resize(from_data_type.size());
std::copy(from_data_type.begin(), from_data_type.end(), output_type_.begin());
auto mem_ids = from.mem_id();
mem_id_.resize(mem_ids.size());
std::copy(mem_ids.begin(), mem_ids.end(), mem_id_.begin());
// nnlib
node_id_ = from.node_id();
op_id_ = from.op_id();
padding_ = from.padding();
auto from_node_input = from.node_input();
node_input_.resize(from_node_input.size());
for (size_t i = 0; i < from_node_input.size(); ++i) {
node_input_[i].CopyFrom(from_node_input[i]);
}
auto from_out_max_byte_size = from.out_max_byte_size();
out_max_byte_size_.resize(from_out_max_byte_size.size());
std::copy(from_out_max_byte_size.begin(), from_out_max_byte_size.end(),
out_max_byte_size_.begin());
has_bits_ = from.has_bits_;
}
const std::string &OperatorDef::name() const { return name_; }
void OperatorDef::set_name(const std::string &name_) {
set_has_name();
OperatorDef::name_ = name_;
}
bool OperatorDef::has_name() const { return (has_bits_ & 0x00000001u) != 0; }
void OperatorDef::set_has_name() { has_bits_ |= 0x00000001u; }
const std::string &OperatorDef::type() const { return type_; }
void OperatorDef::set_type(const std::string &type_) {
set_has_type();
OperatorDef::type_ = type_;
}
bool OperatorDef::has_type() const { return (has_bits_ & 0x00000002u) != 0; }
void OperatorDef::set_has_type() { has_bits_ |= 0x00000002u; }
const std::vector<int> &OperatorDef::mem_id() const { return mem_id_; }
void OperatorDef::set_mem_id(const std::vector<int> &value) {
mem_id_.resize(value.size());
std::copy(value.begin(), value.end(), mem_id_.begin());
}
uint32_t OperatorDef::node_id() const { return node_id_; }
void OperatorDef::set_node_id(uint32_t node_id) { node_id_ = node_id; }
uint32_t OperatorDef::op_id() const { return op_id_; }
uint32_t OperatorDef::padding() const { return padding_; }
void OperatorDef::set_padding(uint32_t padding) { padding_ = padding; }
const std::vector<NodeInput> &OperatorDef::node_input() const {
return node_input_;
}
void OperatorDef::add_node_input(const NodeInput &value) {
node_input_.push_back(value);
}
const std::vector<int> &OperatorDef::out_max_byte_size() const {
return out_max_byte_size_;
}
void OperatorDef::add_out_max_byte_size(int value) {
out_max_byte_size_.push_back(value);
}
const std::vector<std::string> &OperatorDef::input() const { return input_; }
const std::string &OperatorDef::input(int index) const {
MACE_CHECK(0 <= index && index <= static_cast<int>(input_.size()));
return input_[index];
}
std::string *OperatorDef::add_input() {
input_.push_back("");
return &input_.back();
}
void OperatorDef::add_input(const ::std::string &value) {
input_.push_back(value);
}
void OperatorDef::add_input(::std::string &&value) { input_.push_back(value); }
void OperatorDef::set_input(const std::vector<std::string> &value) {
input_.resize(value.size());
std::copy(value.begin(), value.end(), input_.begin());
}
const std::vector<std::string> &OperatorDef::output() const { return output_; }
const std::string &OperatorDef::output(int index) const {
MACE_CHECK(0 <= index && index <= static_cast<int>(output_.size()));
return output_[index];
}
std::string *OperatorDef::add_output() {
output_.push_back("");
return &output_.back();
}
void OperatorDef::add_output(const ::std::string &value) {
output_.push_back(value);
}
void OperatorDef::add_output(::std::string &&value) {
output_.push_back(value);
}
void OperatorDef::set_output(const std::vector<std::string> &value) {
output_.resize(value.size());
std::copy(value.begin(), value.end(), output_.begin());
}
const std::vector<Argument> &OperatorDef::arg() const { return arg_; }
Argument *OperatorDef::add_arg() {
arg_.emplace_back(Argument());
return &arg_.back();
}
const std::vector<OutputShape> &OperatorDef::output_shape() const {
return output_shape_;
}
void OperatorDef::add_output_shape(const OutputShape &value) {
output_shape_.push_back(value);
}
const std::vector<DataType> &OperatorDef::output_type() const {
return output_type_;
}
void OperatorDef::set_output_type(const std::vector<DataType> &value) {
output_type_.resize(value.size());
std::copy(value.begin(), value.end(), output_type_.begin());
}
// MemoryBlock
MemoryBlock::MemoryBlock(int mem_id, uint32_t x, uint32_t y)
: mem_id_(mem_id), x_(x), y_(y) {}
int MemoryBlock::mem_id() const { return mem_id_; }
uint32_t MemoryBlock::x() const { return x_; }
uint32_t MemoryBlock::y() const { return y_; }
// MemoryArena
const std::vector<MemoryBlock> &MemoryArena::mem_block() const {
return mem_block_;
}
std::vector<MemoryBlock> &MemoryArena::mutable_mem_block() {
return mem_block_;
}
int MemoryArena::mem_block_size() const { return mem_block_.size(); }
// InputInfo
const std::string &InputInfo::name() const { return name_; }
int32_t InputInfo::node_id() const { return node_id_; }
int32_t InputInfo::max_byte_size() const { return max_byte_size_; }
DataType InputInfo::data_type() const { return data_type_; }
const std::vector<int32_t> &InputInfo::dims() const { return dims_; }
// OutputInfo
const std::string &OutputInfo::name() const { return name_; }
int32_t OutputInfo::node_id() const { return node_id_; }
int32_t OutputInfo::max_byte_size() const { return max_byte_size_; }
DataType OutputInfo::data_type() const { return data_type_; }
void OutputInfo::set_data_type(DataType data_type) { data_type_ = data_type; }
const std::vector<int32_t> &OutputInfo::dims() const { return dims_; }
void OutputInfo::set_dims(const std::vector<int32_t> &dims) { dims_ = dims; }
// NetDef
NetDef::NetDef() : has_bits_(0) {}
const std::string &NetDef::name() const { return name_; }
void NetDef::set_name(const std::string &value) {
set_has_name();
name_ = value;
}
bool NetDef::has_name() const { return (has_bits_ & 0x00000001u) != 0; }
void NetDef::set_has_name() { has_bits_ |= 0x00000001u; }
const std::string &NetDef::version() const { return version_; }
void NetDef::set_version(const std::string &value) {
set_has_version();
version_ = value;
}
bool NetDef::has_version() const { return (has_bits_ & 0x00000002u) != 0; }
void NetDef::set_has_version() { has_bits_ |= 0x00000002u; }
const std::vector<OperatorDef> &NetDef::op() const { return op_; }
OperatorDef *NetDef::add_op() {
op_.emplace_back(OperatorDef());
return &op_.back();
}
std::vector<OperatorDef> &NetDef::mutable_op() { return op_; }
const std::vector<Argument> &NetDef::arg() const { return arg_; }
Argument *NetDef::add_arg() {
arg_.emplace_back(Argument());
return &arg_.back();
}
std::vector<Argument> &NetDef::mutable_arg() { return arg_; }
const std::vector<ConstTensor> &NetDef::tensors() const { return tensors_; }
std::vector<ConstTensor> &NetDef::mutable_tensors() { return tensors_; }
const MemoryArena &NetDef::mem_arena() const { return mem_arena_; }
MemoryArena &NetDef::mutable_mem_arena() {
set_has_mem_arena();
return mem_arena_;
}
bool NetDef::has_mem_arena() const { return (has_bits_ & 0x00000004u) != 0; }
void NetDef::set_has_mem_arena() { has_bits_ |= 0x00000004u; }
const std::vector<InputInfo> &NetDef::input_info() const { return input_info_; }
const std::vector<OutputInfo> &NetDef::output_info() const {
return output_info_;
}
std::vector<OutputInfo> &NetDef::mutable_output_info() { return output_info_; }
int NetDef::op_size() const { return op_.size(); }
const OperatorDef &NetDef::op(const int idx) const {
MACE_CHECK(0 <= idx && idx < op_size());
return op_[idx];
}
}; // namespace mace
......@@ -110,7 +110,8 @@ bool SerialNet::Run(RunMetadata *run_metadata) {
}
OperatorStats op_stats = {op->debug_def().name(), op->debug_def().type(),
op->debug_def().output_shape(),
{op->debug_def().output_shape().begin(),
op->debug_def().output_shape().end()},
{strides, padding_type, paddings, dilations,
kernels}, call_stats};
run_metadata->op_stats.emplace_back(op_stats);
......
......@@ -25,8 +25,8 @@
#include "mace/core/registry.h"
#include "mace/core/tensor.h"
#include "mace/core/workspace.h"
#include "mace/proto/mace.pb.h"
#include "mace/public/mace.h"
#include "mace/public/mace_types.h"
namespace mace {
......@@ -108,20 +108,20 @@ class Operator : public OperatorBase {
inputs_.push_back(tensor);
}
for (size_t i = 0; i < operator_def.output().size(); ++i) {
const std::string output_str = operator_def.output()[i];
for (size_t i = 0; i < (size_t)operator_def.output_size(); ++i) {
const std::string output_str = operator_def.output(i);
if (ws->HasTensor(output_str)) {
outputs_.push_back(ws->GetTensor(output_str));
} else {
MACE_CHECK(
operator_def.output_type().size() == 0
|| operator_def.output().size() == operator_def.output_type().size(),
operator_def.output_type_size() == 0
|| operator_def.output_size() == operator_def.output_type_size(),
"operator output size != operator output type size",
operator_def.output().size(),
operator_def.output_type().size());
operator_def.output_size(),
operator_def.output_type_size());
DataType output_type;
if (i < operator_def.output_type().size()) {
output_type = operator_def.output_type()[i];
if (i < (size_t)operator_def.output_type_size()) {
output_type = operator_def.output_type(i);
} else {
output_type = DataTypeToEnum<T>::v();
}
......
......@@ -68,7 +68,8 @@ bool HexagonControlWrapper::Finalize() {
return hexagon_controller_DeInitHexagon() == 0;
}
bool HexagonControlWrapper::SetupGraph(const NetDef &net_def) {
bool HexagonControlWrapper::SetupGraph(const NetDef &net_def,
unsigned const char *model_data) {
LOG(INFO) << "Hexagon setup graph";
int64_t t0 = NowMicros();
......@@ -96,7 +97,7 @@ bool HexagonControlWrapper::SetupGraph(const NetDef &net_def) {
const_node.tensor.dataLen = 0;
} else {
const_node.tensor.data =
const_cast<unsigned char *>(const_tensor.data());
const_cast<unsigned char *>(model_data + const_tensor.offset());
const_node.tensor.dataLen = const_tensor.data_size() *
GetEnumTypeSize(const_tensor.data_type());
}
......@@ -133,12 +134,12 @@ bool HexagonControlWrapper::SetupGraph(const NetDef &net_def) {
for (const OperatorDef &op : net_def.op()) {
int op_id = op_map.GetOpId(op.type());
inputs.resize(op.node_input().size());
for (size_t i = 0; i < op.node_input().size(); ++i) {
for (size_t i = 0; i < (size_t)op.node_input().size(); ++i) {
inputs[i].src_id = node_id(op.node_input()[i].node_id());
inputs[i].output_idx = op.node_input()[i].output_port();
}
outputs.resize(op.out_max_byte_size().size());
for (size_t i = 0; i < op.out_max_byte_size().size(); ++i) {
for (size_t i = 0; i < (size_t)op.out_max_byte_size().size(); ++i) {
outputs[i].max_size = op.out_max_byte_size()[i];
}
cached_inputs.push_back(inputs);
......
......@@ -31,7 +31,7 @@ class HexagonControlWrapper {
bool Config();
bool Init();
bool Finalize();
bool SetupGraph(const NetDef &net_def);
bool SetupGraph(const NetDef &net_def, const unsigned char *model_data);
bool ExecuteGraph(const Tensor &input_tensor, Tensor *output_tensor);
bool ExecuteGraphNew(const std::vector<Tensor> &input_tensors,
std::vector<Tensor> *output_tensors);
......
......@@ -18,7 +18,7 @@
#include <cstdint>
#include <string>
#include "mace/public/mace_types.h"
#include "mace/proto/mace.pb.h"
#ifdef MACE_ENABLE_OPENCL
#include "include/half.hpp"
#endif
......
......@@ -60,24 +60,17 @@ std::vector<std::string> Workspace::Tensors() const {
return names;
}
void Workspace::LoadModelTensor(const NetDef &net_def, DeviceType type) {
void Workspace::LoadModelTensor(const NetDef &net_def,
DeviceType type,
const unsigned char *model_data) {
MACE_LATENCY_LOGGER(1, "Load model tensors");
index_t model_data_size = 0;
unsigned char *model_data_ptr = nullptr;
for (auto &const_tensor : net_def.tensors()) {
if (model_data_ptr == nullptr ||
reinterpret_cast<int64_t>(const_tensor.data()) <
reinterpret_cast<int64_t>(model_data_ptr)) {
model_data_ptr = const_cast<unsigned char *>(const_tensor.data());
}
}
for (auto &const_tensor : net_def.tensors()) {
model_data_size = std::max(
model_data_size,
static_cast<index_t>((reinterpret_cast<int64_t>(const_tensor.data()) -
reinterpret_cast<int64_t>(model_data_ptr)) +
static_cast<index_t>(const_tensor.offset() +
const_tensor.data_size() *
GetEnumTypeSize(const_tensor.data_type())));
GetEnumTypeSize(const_tensor.data_type())));
}
VLOG(3) << "Model data size: " << model_data_size;
......@@ -85,13 +78,14 @@ void Workspace::LoadModelTensor(const NetDef &net_def, DeviceType type) {
if (type == DeviceType::CPU) {
tensor_buffer_ = std::unique_ptr<Buffer>(
new Buffer(GetDeviceAllocator(type),
model_data_ptr,
const_cast<unsigned char*>(model_data),
model_data_size));
} else {
tensor_buffer_ = std::unique_ptr<Buffer>(
new Buffer(GetDeviceAllocator(type), model_data_size));
tensor_buffer_->Map(nullptr);
tensor_buffer_->Copy(model_data_ptr, 0, model_data_size);
tensor_buffer_->Copy(const_cast<unsigned char*>(model_data),
0, model_data_size);
tensor_buffer_->UnMap();
}
}
......@@ -107,10 +101,8 @@ void Workspace::LoadModelTensor(const NetDef &net_def, DeviceType type) {
dims.push_back(d);
}
index_t offset = reinterpret_cast<int64_t>(const_tensor.data())
- reinterpret_cast<int64_t>(model_data_ptr);
std::unique_ptr<Tensor> tensor(
new Tensor(BufferSlice(tensor_buffer_.get(), offset,
new Tensor(BufferSlice(tensor_buffer_.get(), const_tensor.offset(),
const_tensor.data_size() *
GetEnumTypeSize(const_tensor.data_type())),
const_tensor.data_type()));
......
......@@ -47,7 +47,9 @@ class Workspace {
std::vector<std::string> Tensors() const;
void LoadModelTensor(const NetDef &net_def, DeviceType type);
void LoadModelTensor(const NetDef &net_def,
DeviceType type,
const unsigned char *model_data);
ScratchBuffer *GetScratchBuffer(DeviceType device_type);
......
......@@ -6,6 +6,7 @@ cc_binary(
srcs = ["example.cc"],
linkopts = if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
copts = ["-Werror"],
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_models",
......
......@@ -38,7 +38,8 @@ cc_library(
"opencl/*.h",
"buffer_to_image.h",
])),
copts = if_openmp_enabled(["-fopenmp"]) +
copts = ["-Werror"] +
if_openmp_enabled(["-fopenmp"]) +
if_neon_enabled(["-DMACE_ENABLE_NEON"]) +
if_android_armv7(["-mfpu=neon"]) +
if_android_armv7(["-mfloat-abi=softfp"]) +
......@@ -61,7 +62,8 @@ cc_test(
"opencl/*_test.cc",
],
),
copts = if_openmp_enabled(["-fopenmp"]) +
copts = ["-Werror"] +
if_openmp_enabled(["-fopenmp"]) +
if_neon_enabled(["-DMACE_ENABLE_NEON"]) +
if_android_armv7(["-mfpu=neon"]) +
if_android_armv7(["-mfloat-abi=softfp"]) +
......
......@@ -37,6 +37,10 @@ struct ImageToBufferFunctor : ImageToBufferFunctorBase {
const BufferType type,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(input);
MACE_UNUSED(type);
MACE_UNUSED(output);
MACE_UNUSED(future);
MACE_NOT_IMPLEMENTED;
}
};
......
......@@ -111,6 +111,7 @@ struct WinogradInverseTransformFunctor : WinogradInverseTransformFunctorBase {
MACE_UNUSED(input);
MACE_UNUSED(bias);
MACE_UNUSED(output);
MACE_UNUSED(future);
MACE_NOT_IMPLEMENTED;
}
};
......
......@@ -15,6 +15,7 @@ cc_library(
hdrs = [
"ops_test_util.h",
],
copts = ["-Werror"],
deps = [
"//mace/core",
"@gtest//:gtest",
......@@ -40,7 +41,8 @@ cc_library(
["*.h"],
exclude = ["ops_test_util.h"],
),
copts = if_openmp_enabled(["-fopenmp"]) +
copts = ["-Werror"] +
if_openmp_enabled(["-fopenmp"]) +
if_neon_enabled(["-DMACE_ENABLE_NEON"]) +
if_android_armv7(["-mfpu=neon"]) +
if_android_armv7(["-mfloat-abi=softfp"]) +
......@@ -58,7 +60,8 @@ cc_test(
srcs = glob(
["*_test.cc"],
),
copts = if_openmp_enabled(["-fopenmp"]) +
copts = ["-Werror"] +
if_openmp_enabled(["-fopenmp"]) +
if_neon_enabled(["-DMACE_ENABLE_NEON"]) +
if_android_armv7(["-mfpu=neon"]) +
if_android_armv7(["-mfloat-abi=softfp"]) +
......@@ -77,7 +80,8 @@ cc_test(
name = "ops_benchmark",
testonly = 1,
srcs = glob(["*_benchmark.cc"]),
copts = if_openmp_enabled(["-fopenmp"]) +
copts = ["-Werror"] +
if_openmp_enabled(["-fopenmp"]) +
if_neon_enabled(["-DMACE_ENABLE_NEON"]) +
if_android_armv7(["-mfpu=neon"]) +
if_android_armv7(["-mfloat-abi=softfp"]) +
......
......@@ -53,7 +53,9 @@ class OpDefBuilder {
}
OpDefBuilder &OutputType(const std::vector<DataType> &output_type) {
op_def_.set_output_type(output_type);
for (auto out_t : output_type) {
op_def_.add_output_type(out_t);
}
return *this;
}
......
......@@ -8,7 +8,9 @@ package(
licenses(["notice"]) # Apache 2.0
load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
load("@com_google_protobuf//:protobuf.bzl",
"py_proto_library",
"cc_proto_library")
py_proto_library(
name = "mace_py",
......@@ -19,11 +21,9 @@ py_proto_library(
deps = ["@com_google_protobuf//:protobuf_python"],
)
py_proto_library(
name = "caffe_py",
srcs = ["caffe.proto"],
default_runtime = "@com_google_protobuf//:protobuf_python",
cc_proto_library(
name = "mace_cc",
srcs = ["mace.proto"],
default_runtime = "@com_google_protobuf//:protobuf_lite",
protoc = "@com_google_protobuf//:protoc",
srcs_version = "PY2AND3",
deps = ["@com_google_protobuf//:protobuf_python"],
)
......@@ -2,14 +2,16 @@ syntax = "proto2";
package mace;
option optimize_for = LITE_RUNTIME;
enum NetMode {
INIT = 0;
NORMAL = 1;
}
enum DeviceType {
CPU = 0; // In default, we will use CPU.
GPU = 2;
CPU = 0; // In default, we will use CPU.
GPU = 2;
HEXAGON = 3;
}
......@@ -32,7 +34,7 @@ enum DataType {
DT_UINT32 = 22;
}
message TensorProto {
message ConstTensor {
// The dimensions in the tensor.
repeated int64 dims = 1;
optional DataType data_type = 2 [default = DT_FLOAT];
......@@ -52,6 +54,8 @@ message TensorProto {
repeated int64 int64_data = 10 [packed = true];
// Optionally, a name for the tensor.
optional string name = 7;
optional int64 offset = 11;
optional int64 data_size = 12;
optional uint32 node_id = 100;
}
......@@ -126,7 +130,7 @@ message NetDef {
repeated OperatorDef op = 2;
optional string version = 3;
repeated Argument arg = 4;
repeated TensorProto tensors = 5;
repeated ConstTensor tensors = 5;
// for mem optimization
optional MemoryArena mem_arena = 10;
......
......@@ -12,6 +12,9 @@ cc_library(
hdrs = [
"mace.h",
"mace_runtime.h",
"mace_types.h",
],
copts = ["-Werror"],
deps = [
"//mace/proto:mace_cc",
],
)
......@@ -24,11 +24,39 @@
#include <string>
#include <vector>
#include "mace/proto/mace.pb.h"
namespace mace {
struct CallStats {
int64_t start_micros;
int64_t end_micros;
};
struct ConvPoolArgs {
std::vector<int> strides;
int padding_type;
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int64_t> kernels;
};
struct OperatorStats {
std::string operator_name;
std::string type;
std::vector<OutputShape> output_shape;
ConvPoolArgs args;
CallStats stats;
};
class RunMetadata {
public:
std::vector<OperatorStats> op_stats;
};
const char *MaceVersion();
enum DeviceType { CPU = 0, GPU = 2, HEXAGON = 3 };
// enum DeviceType { CPU = 0, GPU = 2, HEXAGON = 3 };
enum MaceStatus { MACE_SUCCESS = 0, MACE_INVALID_ARGS = 1 };
......@@ -56,15 +84,13 @@ class MaceTensor {
std::unique_ptr<Impl> impl_;
};
class NetDef;
class RunMetadata;
class MaceEngine {
public:
explicit MaceEngine(const NetDef *net_def,
DeviceType device_type,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes);
const std::vector<std::string> &output_nodes,
const unsigned char *model_data);
~MaceEngine();
MaceStatus Run(const std::map<std::string, MaceTensor> &inputs,
......
// Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file defines data types used by net creation and benchmark tools.
// These APIs are not stable and should only be used by advanced users.
#ifndef MACE_PUBLIC_MACE_TYPES_H_
#define MACE_PUBLIC_MACE_TYPES_H_
#include <string>
#include <vector>
namespace mace {
// Disable the copy and assignment operator for a class.
#ifndef DISABLE_COPY_AND_ASSIGN
#define DISABLE_COPY_AND_ASSIGN(classname) \
private: \
classname(const classname &) = delete; \
classname &operator=(const classname &) = delete
#endif
enum NetMode { INIT = 0, NORMAL = 1 };
enum DataType {
DT_INVALID = 0,
DT_FLOAT = 1,
DT_DOUBLE = 2,
DT_INT32 = 3,
DT_UINT8 = 4,
DT_INT16 = 5,
DT_INT8 = 6,
DT_STRING = 7,
DT_INT64 = 8,
DT_UINT16 = 9,
DT_BOOL = 10,
DT_HALF = 19,
DT_UINT32 = 22
};
class ConstTensor {
public:
ConstTensor(const std::string &name,
const unsigned char *data,
const std::vector<int64_t> &dims,
const DataType data_type = DT_FLOAT,
uint32_t node_id = 0);
ConstTensor(const std::string &name,
const unsigned char *data,
const std::vector<int64_t> &dims,
const int data_type,
uint32_t node_id = 0);
const std::string &name() const;
const unsigned char *data() const;
int64_t data_size() const;
const std::vector<int64_t> &dims() const;
DataType data_type() const;
uint32_t node_id() const;
private:
const std::string name_;
const unsigned char *data_;
const int64_t data_size_;
const std::vector<int64_t> dims_;
const DataType data_type_;
const uint32_t node_id_;
};
class Argument {
public:
Argument();
void CopyFrom(const Argument &from);
public:
const std::string &name() const;
void set_name(const std::string &value);
bool has_f() const;
float f() const;
void set_f(float value);
bool has_i() const;
int64_t i() const;
void set_i(int64_t value);
bool has_s() const;
std::string s() const;
void set_s(const std::string &value);
const std::vector<float> &floats() const;
void add_floats(float value);
void set_floats(const std::vector<float> &value);
const std::vector<int64_t> &ints() const;
void add_ints(int64_t value);
void set_ints(const std::vector<int64_t> &value);
const std::vector<std::string> &strings() const;
void add_strings(const ::std::string &value);
void set_strings(const std::vector<std::string> &value);
private:
void set_has_f();
void set_has_i();
void set_has_s();
private:
std::string name_;
float f_;
int64_t i_;
std::string s_;
std::vector<float> floats_;
std::vector<int64_t> ints_;
std::vector<std::string> strings_;
uint32_t has_bits_;
};
class NodeInput {
public:
NodeInput() {}
NodeInput(int node_id, int output_port);
void CopyFrom(const NodeInput &from);
public:
int node_id() const;
void set_node_id(int node_id);
int output_port() const;
void set_output_port(int output_port);
private:
int node_id_;
int output_port_;
};
class OutputShape {
public:
OutputShape();
OutputShape(const std::vector<int64_t> &dims); // NOLINT(runtime/explicit)
void CopyFrom(const OutputShape &from);
public:
const std::vector<int64_t> &dims() const;
private:
std::vector<int64_t> dims_;
};
class OperatorDef {
public:
void CopyFrom(const OperatorDef &from);
public:
const std::string &name() const;
void set_name(const std::string &name_);
bool has_name() const;
const std::string &type() const;
void set_type(const std::string &type_);
bool has_type() const;
const std::vector<int> &mem_id() const;
void set_mem_id(const std::vector<int> &value);
uint32_t node_id() const;
void set_node_id(uint32_t node_id);
uint32_t op_id() const;
uint32_t padding() const;
void set_padding(uint32_t padding);
const std::vector<NodeInput> &node_input() const;
void add_node_input(const NodeInput &value);
const std::vector<int> &out_max_byte_size() const;
void add_out_max_byte_size(int value);
const std::vector<std::string> &input() const;
const std::string &input(int index) const;
std::string *add_input();
void add_input(const ::std::string &value);
void add_input(::std::string &&value);
void set_input(const std::vector<std::string> &value);
const std::vector<std::string> &output() const;
const std::string &output(int index) const;
std::string *add_output();
void add_output(const ::std::string &value);
void add_output(::std::string &&value);
void set_output(const std::vector<std::string> &value);
const std::vector<Argument> &arg() const;
Argument *add_arg();
const std::vector<OutputShape> &output_shape() const;
void add_output_shape(const OutputShape &value);
const std::vector<DataType> &output_type() const;
void set_output_type(const std::vector<DataType> &value);
private:
void set_has_name();
void set_has_type();
void set_has_mem_id();
private:
std::string name_;
std::string type_;
std::vector<std::string> input_;
std::vector<std::string> output_;
std::vector<Argument> arg_;
std::vector<OutputShape> output_shape_;
std::vector<DataType> output_type_;
std::vector<int> mem_id_;
// nnlib
uint32_t node_id_;
uint32_t op_id_;
uint32_t padding_;
std::vector<NodeInput> node_input_;
std::vector<int> out_max_byte_size_;
uint32_t has_bits_;
};
class MemoryBlock {
public:
MemoryBlock(int mem_id, uint32_t x, uint32_t y);
public:
int mem_id() const;
uint32_t x() const;
uint32_t y() const;
private:
int mem_id_;
uint32_t x_;
uint32_t y_;
};
class MemoryArena {
public:
const std::vector<MemoryBlock> &mem_block() const;
std::vector<MemoryBlock> &mutable_mem_block();
int mem_block_size() const;
private:
std::vector<MemoryBlock> mem_block_;
};
// for hexagon mace-nnlib
class InputInfo {
public:
const std::string &name() const;
int32_t node_id() const;
int32_t max_byte_size() const;
DataType data_type() const;
const std::vector<int32_t> &dims() const;
private:
std::string name_;
int32_t node_id_;
int32_t max_byte_size_; // only support 32-bit len
DataType data_type_;
std::vector<int32_t> dims_;
};
class OutputInfo {
public:
const std::string &name() const;
int32_t node_id() const;
int32_t max_byte_size() const;
DataType data_type() const;
void set_data_type(DataType data_type);
const std::vector<int32_t> &dims() const;
void set_dims(const std::vector<int32_t> &dims);
private:
std::string name_;
int32_t node_id_;
int32_t max_byte_size_; // only support 32-bit len
DataType data_type_;
std::vector<int32_t> dims_;
};
class NetDef {
public:
NetDef();
int op_size() const;
const OperatorDef &op(const int idx) const;
public:
const std::string &name() const;
bool has_name() const;
void set_name(const std::string &value);
const std::string &version() const;
bool has_version() const;
void set_version(const std::string &value);
const std::vector<OperatorDef> &op() const;
OperatorDef *add_op();
std::vector<OperatorDef> &mutable_op();
const std::vector<Argument> &arg() const;
Argument *add_arg();
std::vector<Argument> &mutable_arg();
const std::vector<ConstTensor> &tensors() const;
std::vector<ConstTensor> &mutable_tensors();
const MemoryArena &mem_arena() const;
bool has_mem_arena() const;
MemoryArena &mutable_mem_arena();
const std::vector<InputInfo> &input_info() const;
const std::vector<OutputInfo> &output_info() const;
std::vector<OutputInfo> &mutable_output_info();
private:
void set_has_name();
void set_has_version();
void set_has_mem_arena();
private:
std::string name_;
std::string version_;
std::vector<OperatorDef> op_;
std::vector<Argument> arg_;
std::vector<ConstTensor> tensors_;
// for mem optimization
MemoryArena mem_arena_;
// for hexagon mace-nnlib
std::vector<InputInfo> input_info_;
std::vector<OutputInfo> output_info_;
uint32_t has_bits_;
};
struct CallStats {
int64_t start_micros;
int64_t end_micros;
};
struct ConvPoolArgs {
std::vector<int> strides;
int padding_type;
std::vector<int> paddings;
std::vector<int> dilations;
std::vector<int64_t> kernels;
};
struct OperatorStats {
std::string operator_name;
std::string type;
std::vector<OutputShape> output_shape;
ConvPoolArgs args;
CallStats stats;
};
class RunMetadata {
public:
std::vector<OperatorStats> op_stats;
};
} // namespace mace
#endif // MACE_PUBLIC_MACE_TYPES_H_
......@@ -173,15 +173,14 @@ def main(unused_args):
print "Memory optimization done."
if FLAGS.output_type == 'source':
source_converter_lib.convert_to_source(
output_graph_def, model_checksum, weight_checksum, FLAGS.template,
FLAGS.obfuscate, FLAGS.model_tag, FLAGS.output, FLAGS.runtime,
FLAGS.embed_model_data, FLAGS.winograd)
else:
with open(FLAGS.output, "wb") as f:
source_converter_lib.convert_to_source(
output_graph_def, model_checksum, weight_checksum, FLAGS.template,
FLAGS.obfuscate, FLAGS.model_tag, FLAGS.codegen_output, FLAGS.runtime,
FLAGS.embed_model_data, FLAGS.winograd, FLAGS.model_load_type)
if FLAGS.model_load_type == 'pb':
with open(FLAGS.pb_output, "wb") as f:
f.write(output_graph_def.SerializeToString())
with open(FLAGS.output + '_txt', "wb") as f:
with open(FLAGS.pb_output + '_txt', "wb") as f:
# output_graph_def.ClearField('tensors')
f.write(str(output_graph_def))
print("Model conversion is completed.")
......@@ -219,10 +218,15 @@ def parse_args():
default="",
help="Weight file sha256 checksum")
parser.add_argument(
"--output",
"--codegen_output",
type=str,
default="",
help="File to save the output graph to.")
parser.add_argument(
"--pb_output",
type=str,
default="",
help="File to save the mace model to.")
parser.add_argument(
"--runtime", type=str, default="", help="Runtime: cpu/gpu/dsp")
parser.add_argument(
......@@ -266,6 +270,12 @@ def parse_args():
type=str2bool,
default=True,
help="embed model data.")
parser.add_argument(
"--model_load_type",
type=str,
default="source",
help="[source|pb] Load models in generated `source` code" +
"or `pb` file.")
return parser.parse_known_args()
......
......@@ -23,6 +23,7 @@
#include "mace/public/mace_runtime.h"
namespace mace {
{% for tag in model_tags %}
namespace {{tag}} {
......@@ -30,7 +31,7 @@ extern const unsigned char *LoadModelData(const char *model_data_file);
extern void UnloadModelData(const unsigned char *model_data);
extern NetDef CreateNet(const unsigned char *model_data);
extern NetDef CreateNet(const std::vector<unsigned char> &model_pb = {});
extern const std::string ModelName();
extern const std::string ModelChecksum();
......@@ -54,7 +55,8 @@ MaceStatus CreateMaceEngine(
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes,
const DeviceType device_type,
std::shared_ptr<MaceEngine> *engine) {
std::shared_ptr<MaceEngine> *engine,
const std::vector<unsigned char> model_pb = {}) {
// load model
if (engine == nullptr) {
return MaceStatus::MACE_INVALID_ARGS;
......@@ -66,9 +68,11 @@ MaceStatus CreateMaceEngine(
case {{ i }}:
model_data =
mace::{{model_tags[i]}}::LoadModelData(model_data_file);
net_def = mace::{{model_tags[i]}}::CreateNet(model_data);
net_def = mace::{{model_tags[i]}}::CreateNet(model_pb);
engine->reset(
new mace::MaceEngine(&net_def, device_type, input_nodes, output_nodes));
new mace::MaceEngine(&net_def, device_type, input_nodes, output_nodes,
model_data));
if (device_type == DeviceType::GPU || device_type == DeviceType::HEXAGON) {
mace::{{model_tags[i]}}::UnloadModelData(model_data);
}
......
......@@ -45,7 +45,7 @@ def parse_args():
parser.add_argument(
"--template_dir", type=str, default="", help="template path")
parser.add_argument(
"--output_dir", type=str, default="", help="template path")
"--output_dir", type=str, default="", help="output path")
return parser.parse_known_args()
......
......@@ -17,16 +17,17 @@
#include <vector>
#include <string>
#include "mace/core/macros.h"
#include "mace/public/mace.h"
#include "mace/utils/env_time.h"
#include "mace/utils/logging.h"
namespace mace {
{% if model_type == 'source' %}
namespace {{tag}} {
{% for tensor in tensors %}
extern void CreateTensor{{ tensor.id }}(std::vector<mace::ConstTensor> *tensors,
const unsigned char *model_data);
extern void CreateTensor{{ tensor.id }}(mace::ConstTensor *tensor);
{% endfor %}
......@@ -40,7 +41,6 @@ namespace {
{% if net.arg|length != 0 %}
void CreateNetArg(mace::NetDef &net_def) {
net_def.mutable_arg().reserve({{ net.arg|length }});
mace::Argument *arg = nullptr;
{% for arg in net.arg %}
......@@ -59,15 +59,15 @@ void CreateNetArg(mace::NetDef &net_def) {
arg->set_s({{ arg.s|tojson }});
{% endif %}
{% if arg.floats|length != 0 %}
arg->set_floats({ {{ arg.floats|join(', ') }} });
{% endif %}
{% if arg.ints|length != 0 %}
arg->set_ints({ {{ arg.ints|join(', ') }} });
{% endif %}
{% if arg.strings|length != 0 %}
arg->set_strings({ {{ arg.strings|stringfy() }} });
{% endif %}
{% for float_value in arg.floats %}
arg->add_floats({ {{ float_value }} });
{% endfor %}
{% for int_value in arg.ints %}
arg->add_ints({ {{ int_value }} });
{% endfor %}
{% for str_value in arg.strings %}
arg->add_strings({ {{ str_value }} });
{% endfor %}
{% endfor %}
}
......@@ -75,82 +75,86 @@ void CreateNetArg(mace::NetDef &net_def) {
{% if net.output_info | length > 0 %}
void CreateOutputInfo(mace::NetDef &net_def) {
std::vector<std::vector<int>> dims { {{net.output_info | map(attribute='dims') | join(', ') | replace('[', '{') | replace(']', '}') }} };
std::vector<std::vector<int>> dims { {{net.output_info | map(attribute='dims') | join(', ') | replace('[', '{') | replace(']', '}') }} };
std::vector<int> data_types_int { {{ net.output_info | map(attribute='data_type') | join(', ') }} };
std::vector<mace::DataType> data_types({{ net.output_info | length }});
for (int k = 0; k < {{ net.output_info | length }}; ++k) {
data_types[k] = static_cast<mace::DataType>(data_types_int[k]);
}
net_def.mutable_output_info().resize({{ net.output_info | length }});
for (int i = 0; i < {{ net.output_info | length }}; ++i) {
net_def.mutable_output_info()[i].set_data_type(data_types[i]);
net_def.mutable_output_info()[i].set_dims(dims[i]);
net_def.add_output_info();
net_def.mutable_output_info(i)->set_data_type(data_types[i]);
for (int j = 0; j < (int)dims[i].size(); ++j) {
net_def.mutable_output_info(i)->add_dims(dims[i][j]);
}
}
}
{% endif %}
void CreateOperators(std::vector<mace::OperatorDef> *ops) {
void CreateOperators(mace::NetDef *net_def) {
MACE_LATENCY_LOGGER(1, "Create operators");
ops->resize({{ net.op|length }});
{% for i in range(net.op|length) %}
mace::{{tag}}::CreateOperator{{i}}(&ops->at({{i}}));
mace::{{tag}}::CreateOperator{{i}}(net_def->add_op());
{% endfor %}
}
void CreateTensors(std::vector<mace::ConstTensor> *tensors,
const unsigned char *model_data) {
void CreateTensors(mace::NetDef *net_def) {
MACE_LATENCY_LOGGER(1, "Create tensors");
tensors->reserve({{ net.tensors|length }});
{% for tensor in tensors %}
mace::{{tag}}::CreateTensor{{tensor.id}}(tensors, model_data);
mace::{{tag}}::CreateTensor{{tensor.id}}(net_def->add_tensors());
{% endfor %}
}
{% if net.mem_arena.mem_block|length != 0 %}
void CreateMemoryArena(mace::MemoryArena *mem_arena) {
std::vector<mace::MemoryBlock> &mem_block = mem_arena->mutable_mem_block();
mem_block.reserve({{ net.mem_arena.mem_block|length }});
{% for i in range(net.mem_arena.mem_block|length) %}
{% for mem_blk in net.mem_arena.mem_block %}
mem_block.emplace_back(mace::MemoryBlock({{ mem_blk.mem_id }},
{{mem_blk.x}},
{{mem_blk.y}}));
{% endfor %}
mace::MemoryBlock* mem_block{{i}} = mem_arena->add_mem_block();
mem_block{{i}}->set_mem_id({{net.mem_arena.mem_block[i].mem_id}});
mem_block{{i}}->set_x({{net.mem_arena.mem_block[i].x}});
mem_block{{i}}->set_y({{net.mem_arena.mem_block[i].y}});
{% endfor %}
}
{% endif %}
} // namespace
{% endif %}
namespace {{tag}} {
NetDef CreateNet(const unsigned char *model_data) {
NetDef CreateNet(const std::vector<unsigned char> &model_pb = {}) {
MACE_LATENCY_LOGGER(1, "Create net {{ net.name }}");
NetDef net_def;
{% if model_type == 'source' %}
MACE_UNUSED(model_pb);
net_def.set_name("{{ net.name}}");
net_def.set_version("{{ net.version }}");
CreateOperators(&net_def);
CreateTensors(&net_def);
{% if net.arg|length != 0 %}
CreateNetArg(net_def);
{% endif %}
CreateOperators(&net_def.mutable_op());
CreateTensors(&net_def.mutable_tensors(), model_data);
{% if net.mem_arena.mem_block|length != 0 %}
CreateMemoryArena(&net_def.mutable_mem_arena());
CreateMemoryArena(net_def.mutable_mem_arena());
{% endif %}
{% if net.output_info | length > 0 %}
CreateOutputInfo(net_def);
{% endif %}
{% else %}
net_def.ParseFromArray(&model_pb[0], model_pb.size());
{% endif %}
return net_def;
}
......
......@@ -34,11 +34,20 @@ void UpdateOp(mace::OperatorDef *op,
const std::vector<int> &mem_ids) {
op->set_name(name);
op->set_type(type);
op->set_input(inputs);
op->set_output(outputs);
op->set_output_type(output_types);
op->set_node_id(node_id);
op->set_mem_id(mem_ids);
for (auto input : inputs) {
op->add_input(input);
}
for (auto output : outputs) {
op->add_output(output);
}
for (auto output_type : output_types) {
op->add_output_type(output_type);
}
for (auto mem_id : mem_ids) {
op->add_mem_id(mem_id);
}
}
} // namespace
......@@ -68,20 +77,27 @@ void CreateOperator{{i}}(mace::OperatorDef *op) {
arg->set_s({{ arg.s|tojson }});
{%- endif %}
{% if arg.floats|length != 0 %}
arg->set_floats({ {{ arg.floats|join(', ') }} });
{% endif %}
{% if arg.ints|length != 0 %}
arg->set_ints({ {{ arg.ints|join(', ') }} });
{% endif %}
{% if arg.strings|length != 0 %}
arg->set_strings({ {{ arg.strings|stringfy() }} });
{% endif %}
{% for float_value in arg.floats %}
arg->add_floats({{ float_value }});
{% endfor %}
{% for int_value in arg.ints %}
arg->add_ints({{ int_value }});
{% endfor %}
{% for str_value in arg.strings %}
arg->add_strings({{ str_value }});
{% endfor %}
{% endfor %}
{% for shape in net.op[i].output_shape %}
{% if shape.dims | length > 0 %}
op->add_output_shape(mace::OutputShape({ {{ shape.dims|join(', ') }} }));
mace::OutputShape * output_shape = nullptr;
output_shape = op->add_output_shape();
{% for dim in shape.dims %}
output_shape->add_dims({{ dim }});
{% endfor %}
{% endif %}
{% endfor %}
......@@ -103,11 +119,14 @@ void CreateOperator{{i}}(mace::OperatorDef *op) {
std::vector<int> input_node_ids({ {{ net.op[i].node_input | map(attribute='node_id') | join(', ') }} });
std::vector<int> input_output_ports({ {{ net.op[i].node_input | map(attribute='output_port') | join(', ')}} });
mace::NodeInput *node_input = nullptr;
for (size_t i = 0; i < {{ net.op[i].node_input | length }}; ++i) {
mace::NodeInput input(input_node_ids[i], input_output_ports[i]);
op->add_node_input(input);
node_input = op->add_node_input();
node_input->set_node_id(input_node_ids[i]);
node_input->set_output_port(input_output_ports[i]);
}
{% endif %}
{% if net.op[i].out_max_byte_size | length > 0 %}
std::vector<int> out_max_byte_sizes {{ net.op[i].out_max_byte_size | replace('[', '{') | replace(']', '}') }};
for (size_t i = 0; i < {{ net.op[i].out_max_byte_size | length }}; ++i) {
......
......@@ -137,7 +137,7 @@ def stringfy(value):
def convert_to_source(net_def, model_checksum, weight_checksum, template_dir,
obfuscate, model_tag, output, runtime, embed_model_data,
winograd_conv):
winograd_conv, model_load_type):
if obfuscate:
obfuscate_name(net_def)
else:
......@@ -163,17 +163,26 @@ def convert_to_source(net_def, model_checksum, weight_checksum, template_dir,
padding = 4 - offset % 4
model_data.extend(bytearray([0] * padding))
offset += padding
source = j2_env.get_template(template_name).render(
tensor_info=tensor_info,
tensor=t,
tag=model_tag,
offset=offset,
)
if t.data_type == mace_pb2.DT_FLOAT:
t.data_size = len(t.float_data)
elif t.data_type == mace_pb2.DT_INT32:
t.data_size = len(t.int32_data)
elif t.data_type == mace_pb2.DT_UINT8:
t.data_size = len(t.int32_data)
t.offset = offset
if model_load_type == 'source':
source = j2_env.get_template(template_name).render(
tensor_info=tensor_info,
tensor=t,
tag=model_tag,
)
with open(output_dir + 'tensor' + str(counter) + '.cc', "wb") as f:
f.write(source)
counter += 1
model_data.extend(tensor_info.data)
offset += len(tensor_info.data)
with open(output_dir + 'tensor' + str(counter) + '.cc', "wb") as f:
f.write(source)
counter += 1
# generate tensor data
template_name = 'tensor_data.jinja2'
......@@ -188,21 +197,22 @@ def convert_to_source(net_def, model_checksum, weight_checksum, template_dir,
with open(output_dir + model_tag + '.data', "wb") as f:
f.write(bytearray(model_data))
# generate op source files
template_name = 'operator.jinja2'
counter = 0
op_size = len(net_def.op)
for start in range(0, op_size, 10):
source = j2_env.get_template(template_name).render(
start=start,
end=min(start + 10, op_size),
net=net_def,
tag=model_tag,
runtime=runtime,
)
with open(output_dir + 'op' + str(counter) + '.cc', "wb") as f:
f.write(source)
counter += 1
if model_load_type == 'source':
# generate op source files
template_name = 'operator.jinja2'
counter = 0
op_size = len(net_def.op)
for start in range(0, op_size, 10):
source = j2_env.get_template(template_name).render(
start=start,
end=min(start + 10, op_size),
net=net_def,
tag=model_tag,
runtime=runtime,
)
with open(output_dir + 'op' + str(counter) + '.cc', "wb") as f:
f.write(source)
counter += 1
# generate model source files
build_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
......@@ -223,7 +233,8 @@ def convert_to_source(net_def, model_checksum, weight_checksum, template_dir,
embed_model_data=embed_model_data,
winograd_conv=winograd_conv,
checksum=checksum,
build_time=build_time)
build_time=build_time,
model_type=model_load_type)
with open(output, "wb") as f:
f.write(source)
......@@ -232,3 +243,15 @@ def convert_to_source(net_def, model_checksum, weight_checksum, template_dir,
source = j2_env.get_template(template_name).render(tag=model_tag, )
with open(output_dir + model_tag + '.h', "wb") as f:
f.write(source)
for t in net_def.tensors:
if t.data_type == mace_pb2.DT_FLOAT:
del t.float_data[:]
if runtime == 'gpu':
t.data_type = mace_pb2.DT_HALF
else:
t.data_type = mace_pb2.DT_FLOAT
elif t.data_type == mace_pb2.DT_INT32:
del t.int32_data[:]
elif t.data_type == mace_pb2.DT_UINT8:
del t.int32_data[:]
......@@ -24,12 +24,16 @@
namespace mace {
namespace {{tag}} {
void CreateTensor{{tensor_info.id}}(std::vector<mace::ConstTensor> *tensors,
const unsigned char *model_data) {
void CreateTensor{{tensor_info.id}}(mace::ConstTensor *const_tensor) {
MACE_LATENCY_LOGGER(2, "Create tensor {{ tensor.name }}");
tensors->emplace_back(mace::ConstTensor(
{{ tensor.name|tojson }}, model_data + {{ offset }},
{ {{ tensor.dims|join(', ') }} }, {{ tensor_info.data_type }}, {{ tensor.node_id }}));
const_tensor->set_name({{ tensor.name|tojson }});
const_tensor->set_offset({{ tensor.offset }});
const_tensor->set_data_size({{ tensor.data_size }});
{% for dim in tensor.dims %}
const_tensor->add_dims({{ dim }});
{% endfor %}
const_tensor->set_data_type(static_cast<DataType>({{ tensor_info.data_type }}));
const_tensor->set_node_id({{ tensor.node_id }});
}
} // namespace {{tag}}
......
......@@ -13,7 +13,8 @@ cc_test(
name = "mace_api_test",
testonly = 1,
srcs = ["mace_api_test.cc"],
copts = if_openmp_enabled(["-fopenmp"]) +
copts = ["-Werror"] +
if_openmp_enabled(["-fopenmp"]) +
if_neon_enabled(["-DMACE_ENABLE_NEON"]) +
if_android_armv7(["-mfpu=neon"]) +
if_android_armv7(["-mfloat-abi=softfp"]) +
......@@ -33,7 +34,8 @@ cc_test(
name = "mace_api_mt_test",
testonly = 1,
srcs = ["mace_api_mt_test.cc"],
copts = if_openmp_enabled(["-fopenmp"]) +
copts = ["-Werror"] +
if_openmp_enabled(["-fopenmp"]) +
if_neon_enabled(["-DMACE_ENABLE_NEON"]) +
if_android_armv7(["-mfpu=neon"]) +
if_android_armv7(["-mfloat-abi=softfp"]) +
......
......@@ -69,8 +69,9 @@ void BufferToImage(const std::string &input_name,
.AddIntArg("mode", mode)
.Finalize(&operator_def);
operator_def.set_mem_id(mem_ids);
for (auto mem_id : mem_ids) {
operator_def.add_mem_id(mem_id);
}
net_def->add_op()->CopyFrom(operator_def);
}
......@@ -112,7 +113,9 @@ void Conv3x3(const std::string &input_name,
.AddIntArg("device", static_cast<int>(device_type))
.Finalize(&operator_def);
operator_def.set_mem_id(mem_ids);
for (auto mem_id : mem_ids) {
operator_def.add_mem_id(mem_id);
}
net_def->add_op()->CopyFrom(operator_def);
}
......@@ -136,20 +139,24 @@ void Relu(const std::string &input_name,
template <typename T>
void AddTensor(const std::string &name,
const std::vector<int64_t> &shape,
T *data,
const int offset,
const int data_size,
NetDef *net_def) {
ConstTensor tensor(name,
reinterpret_cast<unsigned char *>(data),
shape,
DataTypeToEnum<T>::value);
net_def->mutable_tensors().push_back(tensor);
ConstTensor *tensor_ptr = net_def->add_tensors();
tensor_ptr->set_name(name);
for (auto dim : shape) {
tensor_ptr->add_dims(dim);
}
tensor_ptr->set_offset(offset);
tensor_ptr->set_data_size(data_size);
tensor_ptr->set_data_type(DataTypeToEnum<T>::value);
}
template <DeviceType D, typename T>
void CheckOutputs(const NetDef &net_def,
const std::map<std::string, mace::MaceTensor> &inputs,
const std::map<std::string, mace::MaceTensor> &outputs) {
const std::map<std::string, mace::MaceTensor> &outputs,
const std::vector<T> &tensor_data) {
ops::test::OpsTestNet net;
for (auto input : inputs) {
auto input_shape = input.second.shape();
......@@ -166,13 +173,14 @@ void CheckOutputs(const NetDef &net_def,
}
auto tensors = net_def.tensors();
for (auto tensor : tensors) {
auto shape = tensor.dims();
std::vector<index_t> shape = {tensor.dims().begin(), tensor.dims().end()};
const int64_t data_size = std::accumulate(shape.begin(),
shape.end(), 1,
std::multiplies<int64_t>());
std::vector<T> data(data_size);
memcpy(data.data(), reinterpret_cast<const T *>(tensor.data()),
data_size * sizeof(T));
memcpy(data.data(),
reinterpret_cast<const T *>(tensor_data.data()) + tensor.offset(),
tensor.data_size() * sizeof(T));
net.AddInputFromArray<D, T>(tensor.name(), shape, data);
}
net.RunNet(net_def, D);
......@@ -218,8 +226,11 @@ std::map<std::string, int> AddMemoryOptimization(
}
size_t input_size = input_names.size();
for (size_t i = 0; i < input_size; ++i) {
net_def->mutable_mem_arena().mutable_mem_block().push_back(
MemoryBlock(mem_id, in_mem_block_x, in_mem_block_y));
MemoryArena *mem_arena_ptr = net_def->mutable_mem_arena();
MemoryBlock *mem_blk_ptr = mem_arena_ptr->add_mem_block();
mem_blk_ptr->set_mem_id(mem_id);
mem_blk_ptr->set_x(in_mem_block_x);
mem_blk_ptr->set_y(in_mem_block_y);
res[input_names[i]] = mem_id;
mem_id++;
}
......@@ -236,8 +247,11 @@ std::map<std::string, int> AddMemoryOptimization(
}
size_t output_size = output_names.size();
for (size_t i = 0; i < output_size; ++i) {
net_def->mutable_mem_arena().mutable_mem_block().push_back(
MemoryBlock(mem_id, out_mem_block_x, out_mem_block_y));
MemoryArena *mem_arena_ptr = net_def->mutable_mem_arena();
MemoryBlock *mem_blk_ptr = mem_arena_ptr->add_mem_block();
mem_blk_ptr->set_mem_id(mem_id);
mem_blk_ptr->set_x(out_mem_block_x);
mem_blk_ptr->set_y(out_mem_block_y);
res[output_names[i]] = mem_id;
mem_id++;
}
......@@ -270,7 +284,7 @@ void MaceRunFunc(const int in_out_size) {
std::vector<half> data;
ops::test::GenerateRandomRealTypeData<half>(filter_shape, &data);
AddTensor<half>(filter_tensor_name, filter_shape, data.data(), &net_def);
AddTensor<half>(filter_tensor_name, filter_shape, 0, data.size(), &net_def);
for (size_t i = 0; i < input_names.size(); ++i) {
std::string input_name = MakeString("mace_input_node_",
......@@ -304,7 +318,8 @@ void MaceRunFunc(const int in_out_size) {
new FileStorageFactory(file_path));
mace::SetKVStorageFactory(storage_factory);
MaceEngine engine(&net_def, device, input_names, output_names);
MaceEngine engine(&net_def, device, input_names, output_names,
reinterpret_cast<unsigned char *>(data.data()));
std::map<std::string, mace::MaceTensor> inputs;
std::map<std::string, mace::MaceTensor> outputs;
......@@ -320,7 +335,7 @@ void MaceRunFunc(const int in_out_size) {
}
}
CheckOutputs<DeviceType::GPU, half>(net_def, inputs, outputs);
CheckOutputs<DeviceType::GPU, half>(net_def, inputs, outputs, data);
}
} // namespace
......
......@@ -79,7 +79,9 @@ void BufferToImage(const std::string &input_name,
.AddIntArg("mode", mode)
.Finalize(&operator_def);
operator_def.set_mem_id(mem_ids);
for (auto mem_id : mem_ids) {
operator_def.add_mem_id(mem_id);
}
net_def->add_op()->CopyFrom(operator_def);
}
......@@ -122,7 +124,9 @@ void Conv3x3(const std::string &input_name,
.AddIntArg("device", static_cast<int>(device_type))
.Finalize(&operator_def);
operator_def.set_mem_id(mem_ids);
for (auto mem_id : mem_ids) {
operator_def.add_mem_id(mem_id);
}
net_def->add_op()->CopyFrom(operator_def);
}
......@@ -146,20 +150,24 @@ void Relu(const std::string &input_name,
template <typename T>
void AddTensor(const std::string &name,
const std::vector<int64_t> &shape,
T *data,
const int offset,
const int data_size,
NetDef *net_def) {
ConstTensor tensor(name,
reinterpret_cast<unsigned char *>(data),
shape,
DataTypeToEnum<T>::value);
net_def->mutable_tensors().push_back(tensor);
ConstTensor *tensor_ptr = net_def->add_tensors();
tensor_ptr->set_name(name);
for (auto dim : shape) {
tensor_ptr->add_dims(dim);
}
tensor_ptr->set_offset(offset);
tensor_ptr->set_data_size(data_size);
tensor_ptr->set_data_type(DataTypeToEnum<T>::value);
}
template <DeviceType D, typename T>
void CheckOutputs(const NetDef &net_def,
const std::map<std::string, mace::MaceTensor> &inputs,
const std::map<std::string, mace::MaceTensor> &outputs) {
const std::map<std::string, mace::MaceTensor> &outputs,
const std::vector<T> &tensor_data) {
ops::test::OpsTestNet net;
for (auto input : inputs) {
auto input_shape = input.second.shape();
......@@ -176,13 +184,14 @@ void CheckOutputs(const NetDef &net_def,
}
auto tensors = net_def.tensors();
for (auto tensor : tensors) {
auto shape = tensor.dims();
std::vector<index_t> shape = {tensor.dims().begin(), tensor.dims().end()};
const int64_t data_size = std::accumulate(shape.begin(),
shape.end(), 1,
std::multiplies<int64_t>());
std::vector<T> data(data_size);
memcpy(data.data(), reinterpret_cast<const T *>(tensor.data()),
data_size * sizeof(T));
memcpy(data.data(),
reinterpret_cast<const T *>(tensor_data.data()) + tensor.offset(),
tensor.data_size() * sizeof(T));
net.AddInputFromArray<D, T>(tensor.name(), shape, data);
}
net.RunNet(net_def, D);
......@@ -228,8 +237,11 @@ std::map<std::string, int> AddMemoryOptimization(
}
size_t input_size = input_names.size();
for (size_t i = 0; i < input_size; ++i) {
net_def->mutable_mem_arena().mutable_mem_block().push_back(
MemoryBlock(mem_id, in_mem_block_x, in_mem_block_y));
MemoryArena *mem_arena_ptr = net_def->mutable_mem_arena();
MemoryBlock *mem_blk_ptr = mem_arena_ptr->add_mem_block();
mem_blk_ptr->set_mem_id(mem_id);
mem_blk_ptr->set_x(in_mem_block_x);
mem_blk_ptr->set_y(in_mem_block_y);
res[input_names[i]] = mem_id;
mem_id++;
}
......@@ -246,8 +258,11 @@ std::map<std::string, int> AddMemoryOptimization(
}
size_t output_size = output_names.size();
for (size_t i = 0; i < output_size; ++i) {
net_def->mutable_mem_arena().mutable_mem_block().push_back(
MemoryBlock(mem_id, out_mem_block_x, out_mem_block_y));
MemoryArena *mem_arena_ptr = net_def->mutable_mem_arena();
MemoryBlock *mem_blk_ptr = mem_arena_ptr->add_mem_block();
mem_blk_ptr->set_mem_id(mem_id);
mem_blk_ptr->set_x(out_mem_block_x);
mem_blk_ptr->set_y(out_mem_block_y);
res[output_names[i]] = mem_id;
mem_id++;
}
......@@ -280,7 +295,7 @@ void MaceRun(const int in_out_size,
std::vector<T> data;
ops::test::GenerateRandomRealTypeData<T>(filter_shape, &data);
AddTensor<T>(filter_tensor_name, filter_shape, data.data(), &net_def);
AddTensor<T>(filter_tensor_name, filter_shape, 0, data.size(), &net_def);
for (size_t i = 0; i < input_names.size(); ++i) {
std::string input_name = MakeString("mace_input_node_",
......@@ -308,7 +323,8 @@ void MaceRun(const int in_out_size,
&net_def);
}
MaceEngine engine(&net_def, device, input_names, output_names);
MaceEngine engine(&net_def, device, input_names, output_names,
reinterpret_cast<unsigned char *>(data.data()));
std::map<std::string, mace::MaceTensor> inputs;
std::map<std::string, mace::MaceTensor> outputs;
......@@ -324,7 +340,7 @@ void MaceRun(const int in_out_size,
}
}
CheckOutputs<DeviceType::GPU, T>(net_def, inputs, outputs);
CheckOutputs<DeviceType::GPU, T>(net_def, inputs, outputs, data);
}
} // namespace
......
......@@ -28,7 +28,6 @@
#include <stdint.h>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <numeric>
......@@ -37,6 +36,7 @@
#include "mace/public/mace_runtime.h"
#include "mace/utils/env_time.h"
#include "mace/utils/logging.h"
#include "mace/utils/utils.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/opencl_runtime.h"
......@@ -186,7 +186,10 @@ DEFINE_string(output_file,
"output file name | output file prefix for multiple outputs");
DEFINE_string(model_data_file,
"",
"model data file name, used when EMBED_MODEL_DATA set to 0");
"model data file name, used when EMBED_MODEL_DATA set to 0 or 2");
DEFINE_string(model_file,
"",
"model file name, used when load mace model in pb");
DEFINE_string(device, "GPU", "CPU/GPU/HEXAGON");
DEFINE_int32(round, 1, "round");
DEFINE_int32(restart_round, 1, "restart round");
......@@ -228,18 +231,25 @@ bool RunModel(const std::string &model_name,
MaceStatus create_engine_status;
// Create Engine
int64_t t0 = NowMicros();
if (FLAGS_model_data_file.empty()) {
const char *model_data_file_ptr =
FLAGS_model_data_file.empty() ? nullptr : FLAGS_model_data_file.c_str();
if (FLAGS_model_file != "") {
std::vector<unsigned char> model_pb_data;
if (!mace::ReadBinaryFile(&model_pb_data, FLAGS_model_file)) {
LOG(FATAL) << "Failed to read file: " << FLAGS_model_file;
}
create_engine_status =
CreateMaceEngine(model_name.c_str(),
nullptr,
model_data_file_ptr,
input_names,
output_names,
device_type,
&engine);
&engine,
model_pb_data);
} else {
create_engine_status =
CreateMaceEngine(model_name.c_str(),
FLAGS_model_data_file.c_str(),
model_data_file_ptr,
input_names,
output_names,
device_type,
......@@ -358,6 +368,7 @@ int Main(int argc, char **argv) {
LOG(INFO) << "input_file: " << FLAGS_input_file;
LOG(INFO) << "output_file: " << FLAGS_output_file;
LOG(INFO) << "model_data_file: " << FLAGS_model_data_file;
LOG(INFO) << "model_file: " << FLAGS_model_file;
LOG(INFO) << "device: " << FLAGS_device;
LOG(INFO) << "round: " << FLAGS_round;
LOG(INFO) << "restart_round: " << FLAGS_restart_round;
......
......@@ -28,8 +28,10 @@ cc_library(
linkopts = if_android([
"-llog",
]),
copts = ["-Werror"],
deps = [
"//mace/public",
"//mace/proto:mace_cc",
],
)
......@@ -38,6 +40,7 @@ cc_library(
srcs = [
"tuner_development.cc",
],
copts = ["-Werror"],
deps = [
":utils",
],
......@@ -48,6 +51,7 @@ cc_library(
srcs = [
"tuner_production.cc",
],
copts = ["-Werror"],
deps = [
":utils",
"//mace/codegen:generated_tuning_params",
......@@ -60,6 +64,7 @@ cc_test(
srcs = [
"tuner_test.cc",
],
copts = ["-Werror"],
linkopts = if_android([
"-pie",
"-lm", # Required by unordered_map
......
......@@ -21,10 +21,11 @@
#include <vector>
#include <utility>
#include "mace/proto/mace.pb.h"
#include "mace/public/mace.h"
#include "mace/public/mace_types.h"
#include "mace/utils/env_time.h"
#include "mace/utils/string_util.h"
#include "mace/utils/utils.h"
#undef ERROR
......
......@@ -15,12 +15,22 @@
#ifndef MACE_UTILS_UTILS_H_
#define MACE_UTILS_UTILS_H_
#include <fstream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
namespace mace {
// Disable the copy and assignment operator for a class.
#ifndef DISABLE_COPY_AND_ASSIGN
#define DISABLE_COPY_AND_ASSIGN(classname) \
private: \
classname(const classname &) = delete; \
classname &operator=(const classname &) = delete
#endif
template <typename Integer>
Integer RoundUp(Integer i, Integer factor) {
return (i + factor - 1) / factor * factor;
......@@ -121,5 +131,26 @@ inline std::vector<std::string> Split(const std::string &str, char delims) {
return result;
}
inline bool ReadBinaryFile(std::vector<unsigned char> *data,
const std::string &filename) {
std::ifstream ifs(filename, std::ios::in | std::ios::binary);
if (!ifs.is_open()) {
return false;
}
ifs.seekg(0, ifs.end);
size_t length = ifs.tellg();
ifs.seekg(0, ifs.beg);
data->reserve(length);
data->insert(data->begin(), std::istreambuf_iterator<char>(ifs),
std::istreambuf_iterator<char>());
if (ifs.fail()) {
return false;
}
ifs.close();
return true;
}
} // namespace mace
#endif // MACE_UTILS_UTILS_H_
......@@ -159,6 +159,7 @@ def tuning_run(target_abi,
output_nodes,
input_shapes,
output_shapes,
mace_model_dir,
model_name,
device_type,
running_round,
......@@ -181,6 +182,7 @@ def tuning_run(target_abi,
output_nodes,
input_shapes,
output_shapes,
mace_model_dir,
model_name,
device_type,
running_round,
......@@ -206,9 +208,9 @@ def tuning_run(target_abi,
def build_mace_run_prod(hexagon_mode, runtime, target_abi,
serialno, vlog_level, embed_model_data,
model_output_dir, input_nodes, output_nodes,
input_shapes, output_shapes, model_name, device_type,
running_round, restart_round, tuning,
limit_opencl_kernel_time, phone_data_dir,
input_shapes, output_shapes, mace_model_dir,
model_name, device_type, running_round, restart_round,
tuning, limit_opencl_kernel_time, phone_data_dir,
enable_openmp):
mace_run_target = "//mace/tools/validation:mace_run"
strip = "always"
......@@ -232,8 +234,8 @@ def build_mace_run_prod(hexagon_mode, runtime, target_abi,
device_type = parse_device_type("gpu")
tuning_run(target_abi, serialno, vlog_level, embed_model_data,
model_output_dir, input_nodes, output_nodes, input_shapes,
output_shapes, model_name, device_type, running_round=0,
restart_round=1, out_of_range_check=False,
output_shapes, mace_model_dir, model_name, device_type,
running_round=0, restart_round=1, out_of_range_check=False,
phone_data_dir=phone_data_dir, tuning=tuning,
limit_opencl_kernel_time=limit_opencl_kernel_time)
......@@ -271,6 +273,7 @@ def merge_libs_and_tuning_results(target_soc,
project_name,
output_dir,
model_output_dirs,
mace_model_dirs_kv,
hexagon_mode,
embed_model_data):
gen_opencl_and_tuning_code(
......@@ -282,6 +285,7 @@ def merge_libs_and_tuning_results(target_soc,
project_name,
output_dir,
model_output_dirs,
mace_model_dirs_kv,
hexagon_mode,
embed_model_data)
......@@ -523,13 +527,21 @@ def parse_args():
type=str,
default="cpu",
help="validation runtime.")
parser.add_argument(
"--model_load_type",
type=str,
default="source",
help="[source|pb] Load models in generated `source` code" +
"or `pb` file.")
return parser.parse_known_args()
def process_models(project_name, configs, embed_model_data, vlog_level,
target_abi, phone_data_dir, target_soc="", serialno=""):
target_abi, phone_data_dir, model_load_type,
target_soc="", serialno=""):
hexagon_mode = get_hexagon_mode(configs)
model_output_dirs = []
mace_model_dirs_kv = {}
for model_name in configs["models"]:
print '===================', model_name, '==================='
......@@ -545,6 +557,11 @@ def process_models(project_name, configs, embed_model_data, vlog_level,
model_output_base_dir = "%s/%s/%s/%s/%s" % (
FLAGS.output_dir, project_name, "build",
model_name, model_path_digest)
if model_load_type == "pb":
mace_model_dir = model_output_base_dir
mace_model_dirs_kv[model_name] = mace_model_dir
else:
mace_model_dir = ""
if target_abi == "host":
model_output_dir = "%s/%s" % (model_output_base_dir, target_abi)
......@@ -587,6 +604,7 @@ def process_models(project_name, configs, embed_model_data, vlog_level,
model_config["output_nodes"],
model_config["input_shapes"],
model_config["output_shapes"],
mace_model_dir,
model_name,
model_device_type,
FLAGS.round,
......@@ -612,6 +630,7 @@ def process_models(project_name, configs, embed_model_data, vlog_level,
model_config["output_nodes"],
model_config["input_shapes"],
model_config["output_shapes"],
mace_model_dir,
model_name,
run_device_type,
FLAGS.round,
......@@ -631,6 +650,7 @@ def process_models(project_name, configs, embed_model_data, vlog_level,
vlog_level,
embed_model_data,
model_output_dir,
mace_model_dir,
model_config["input_nodes"],
model_config["output_nodes"],
model_config["input_shapes"],
......@@ -667,6 +687,7 @@ def process_models(project_name, configs, embed_model_data, vlog_level,
project_name,
FLAGS.output_dir,
model_output_dirs,
mace_model_dirs_kv,
hexagon_mode,
embed_model_data)
......@@ -778,7 +799,9 @@ def main(unused_args):
model_config["dsp_mode"],
embed_model_data,
model_config["fast_conv"],
model_config["obfuscate"])
model_config["obfuscate"],
model_output_base_dir,
FLAGS.model_load_type)
for target_abi in configs["target_abis"]:
for target_soc in target_socs:
......@@ -796,12 +819,14 @@ def main(unused_args):
props["ro.product.model"]))
process_models(project_name, configs, embed_model_data,
vlog_level, target_abi, phone_data_dir,
target_soc, serialno)
FLAGS.model_load_type, target_soc,
serialno)
else:
print("====================================================")
print("Run on host")
process_models(project_name, configs, embed_model_data,
vlog_level, target_abi, phone_data_dir)
vlog_level, target_abi, phone_data_dir,
FLAGS.model_load_type)
if FLAGS.mode == "build" or FLAGS.mode == "all":
sh_commands.packaging_lib(FLAGS.output_dir, project_name)
......
......@@ -287,7 +287,6 @@ def bazel_build(target,
target,
"--copt=-std=c++11",
"--copt=-D_GLIBCXX_USE_C99_MATH_TR1",
"--copt=-Werror",
"--copt=-Wextra",
"--copt=-Wno-missing-field-initializers",
"--copt=-O3",
......@@ -316,7 +315,6 @@ def bazel_build(target,
"--cpu=%s" % abi,
"--copt=-std=c++11",
"--copt=-D_GLIBCXX_USE_C99_MATH_TR1",
"--copt=-Werror",
"--copt=-Wextra",
"--copt=-Wno-missing-field-initializers",
"--copt=-DMACE_OBFUSCATE_LITERALS",
......@@ -375,7 +373,8 @@ def gen_encrypted_opencl_source(codegen_path="mace/codegen"):
"mace/codegen/opencl/opencl_encrypt_program.cc")
def gen_mace_engine_factory_source(model_tags, codegen_path="mace/codegen"):
def gen_mace_engine_factory_source(model_tags,
codegen_path="mace/codegen"):
print("* Genearte mace engine creator source")
codegen_tools_dir = "%s/engine" % codegen_path
sh.rm("-rf", codegen_tools_dir)
......@@ -471,12 +470,16 @@ def gen_model_code(model_codegen_dir,
dsp_mode,
embed_model_data,
fast_conv,
obfuscate):
obfuscate,
model_output_dir,
model_load_type):
print("* Genearte model code")
bazel_build_common("//mace/python/tools:converter")
if os.path.exists(model_codegen_dir):
sh.rm("-rf", model_codegen_dir)
sh.mkdir("-p", model_codegen_dir)
stdout_buff = []
process_output = make_output_processor(stdout_buff)
p = sh.python("bazel-bin/mace/python/tools/converter",
......@@ -485,11 +488,9 @@ def gen_model_code(model_codegen_dir,
"--model_file=%s" % model_file_path,
"--weight_file=%s" % weight_file_path,
"--model_checksum=%s" % model_sha256_checksum,
"--output=%s" % model_codegen_dir + "/model.cc",
"--input_node=%s" % input_nodes,
"--output_node=%s" % output_nodes,
"--runtime=%s" % runtime,
"--output_type=source",
"--template=%s" % "mace/python/tools",
"--model_tag=%s" % model_tag,
"--input_shape=%s" % input_shapes,
......@@ -497,6 +498,9 @@ def gen_model_code(model_codegen_dir,
"--embed_model_data=%s" % embed_model_data,
"--winograd=%s" % fast_conv,
"--obfuscate=%s" % obfuscate,
"--codegen_output=%s/model.cc" % model_codegen_dir,
"--pb_output=%s/%s.pb" % (model_output_dir, model_tag),
"--model_load_type=%s" % model_load_type,
_out=process_output,
_bg=True,
_err_to_out=True)
......@@ -577,6 +581,7 @@ def tuning_run(abi,
output_nodes,
input_shapes,
output_shapes,
mace_model_dir,
model_tag,
device_type,
running_round,
......@@ -601,6 +606,10 @@ def tuning_run(abi,
str(out_of_range_check), omp_num_threads, cpu_affinity_policy,
gpu_perf_hint, gpu_priority_hint))
if abi == "host":
if mace_model_dir:
mace_model_path = "%s/%s.pb" % (mace_model_dir, model_tag)
else:
mace_model_path = ""
p = subprocess.Popen(
[
"env",
......@@ -621,6 +630,7 @@ def tuning_run(abi,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_path,
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
......@@ -647,6 +657,14 @@ def tuning_run(abi,
adb_push("mace/third_party/nnlib/libhexagon_controller.so",
phone_data_dir, serialno)
if mace_model_dir:
mace_model_path = "%s/%s.pb" % (phone_data_dir, model_tag)
adb_push("%s/%s.pb" % (mace_model_dir, model_tag),
mace_model_path,
serialno)
else:
mace_model_path = ""
stdout_buff = []
process_output = make_output_processor(stdout_buff)
adb_cmd = [
......@@ -681,6 +699,7 @@ def tuning_run(abi,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_path,
])
adb_cmd = ' '.join(adb_cmd)
p = sh.adb(
......@@ -832,6 +851,7 @@ def merge_libs(target_soc,
project_name,
libmace_output_dir,
model_output_dirs,
mace_model_dirs_kv,
hexagon_mode,
embed_model_data):
print("* Merge mace lib")
......@@ -903,6 +923,10 @@ def merge_libs(target_soc,
model_data_dir)
sh.cp("-f", glob.glob("%s/*.h" % model_output_dir), model_header_dir)
for model_name in mace_model_dirs_kv:
sh.cp("-f", "%s/%s.pb" % (mace_model_dirs_kv[model_name], model_name),
model_data_dir)
mri_stream += "save\n"
mri_stream += "end\n"
......@@ -969,6 +993,7 @@ def benchmark_model(abi,
vlog_level,
embed_model_data,
model_output_dir,
mace_model_dir,
input_nodes,
output_nodes,
input_shapes,
......@@ -986,6 +1011,10 @@ def benchmark_model(abi,
stdout_buff = []
process_output = make_output_processor(stdout_buff)
if abi == "host":
if mace_model_dir:
mace_model_path = "%s/%s.pb" % (mace_model_dir, model_tag)
else:
mace_model_path = ""
p = subprocess.Popen(
[
"env",
......@@ -1003,6 +1032,7 @@ def benchmark_model(abi,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_path,
])
p.wait()
else:
......@@ -1020,6 +1050,14 @@ def benchmark_model(abi,
if not embed_model_data:
adb_push("%s/%s.data" % (model_output_dir, model_tag),
phone_data_dir, serialno)
if mace_model_dir:
mace_model_path = "%s/%s.pb" % (phone_data_dir, model_tag)
adb_push("%s/%s.pb" % (mace_model_dir, model_tag),
mace_model_path,
serialno)
else:
mace_model_path = ""
p = sh.adb(
"-s",
serialno,
......@@ -1043,6 +1081,7 @@ def benchmark_model(abi,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_path,
_out=process_output,
_bg=True,
_err_to_out=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册