提交 0c7001fa 编写于 作者: 李寅

Refactor quantization tool; Add image to tensor and backward tools

上级 ab908a59
......@@ -131,7 +131,6 @@ model_tests:
python tools/converter.py convert --config=${CONF_FILE} --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --example --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
- bazel build --copt=-fopenmp mace/tools/quantization:quantize_stat
- rm -rf mace-models
build_android_demo:
......
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <dirent.h>
#include <malloc.h>
#include <stdint.h>
#include <cstdlib>
......@@ -104,6 +105,12 @@ DEFINE_string(input_file,
DEFINE_string(output_file,
"",
"output file name | output file prefix for multiple outputs");
DEFINE_string(input_dir,
"",
"input directory name");
DEFINE_string(output_dir,
"",
"output directory name");
DEFINE_string(opencl_binary_file,
"",
"compiled opencl binary file path");
......@@ -232,24 +239,14 @@ bool RunModel(const std::vector<std::string> &input_names,
std::map<std::string, mace::MaceTensor> inputs;
std::map<std::string, mace::MaceTensor> outputs;
std::map<std::string, int64_t> inputs_size;
for (size_t i = 0; i < input_count; ++i) {
// Allocate input and output
int64_t input_size =
std::accumulate(input_shapes[i].begin(), input_shapes[i].end(), 1,
std::multiplies<int64_t>());
inputs_size[input_names[i]] = input_size;
auto buffer_in = std::shared_ptr<float>(new float[input_size],
std::default_delete<float[]>());
// load input
std::ifstream in_file(FLAGS_input_file + "_" + FormatName(input_names[i]),
std::ios::in | std::ios::binary);
if (in_file.is_open()) {
in_file.read(reinterpret_cast<char *>(buffer_in.get()),
input_size * sizeof(float));
in_file.close();
} else {
std::cout << "Open input file failed" << std::endl;
return -1;
}
inputs[input_names[i]] = mace::MaceTensor(input_shapes[i], buffer_in);
}
......@@ -262,30 +259,99 @@ bool RunModel(const std::vector<std::string> &input_names,
outputs[output_names[i]] = mace::MaceTensor(output_shapes[i], buffer_out);
}
std::cout << "Warm up run" << std::endl;
engine->Run(inputs, &outputs);
if (FLAGS_round > 0) {
std::cout << "Run model" << std::endl;
for (int i = 0; i < FLAGS_round; ++i) {
engine->Run(inputs, &outputs);
if (!FLAGS_input_dir.empty()) {
DIR *dir_parent;
struct dirent *entry;
dir_parent = opendir(FLAGS_input_dir.c_str());
if (dir_parent) {
while ((entry = readdir(dir_parent))) {
std::string file_name = std::string(entry->d_name);
std::string prefix = FormatName(input_names[0]);
if (file_name.find(prefix) == 0) {
std::string suffix = file_name.substr(prefix.size());
for (size_t i = 0; i < input_count; ++i) {
file_name = FLAGS_input_dir + "/" + FormatName(input_names[i])
+ suffix;
std::ifstream in_file(file_name, std::ios::in | std::ios::binary);
std::cout << "Read " << file_name << std::endl;
if (in_file.is_open()) {
in_file.read(reinterpret_cast<char *>(
inputs[input_names[i]].data().get()),
inputs_size[input_names[i]] * sizeof(float));
in_file.close();
} else {
std::cerr << "Open input file failed" << std::endl;
return -1;
}
}
engine->Run(inputs, &outputs);
if (!FLAGS_output_dir.empty()) {
for (size_t i = 0; i < output_count; ++i) {
std::string output_name =
FLAGS_output_dir + "/" + FormatName(output_names[i]) + suffix;
std::ofstream out_file(output_name, std::ios::binary);
if (out_file.is_open()) {
int64_t output_size =
std::accumulate(output_shapes[i].begin(),
output_shapes[i].end(),
1,
std::multiplies<int64_t>());
out_file.write(
reinterpret_cast<char *>(
outputs[output_names[i]].data().get()),
output_size * sizeof(float));
out_file.flush();
out_file.close();
} else {
std::cerr << "Open output file failed";
return -1;
}
}
}
}
}
closedir(dir_parent);
} else {
std::cerr << "Directory " << FLAGS_input_dir << " does not exist.";
}
} else {
for (size_t i = 0; i < input_count; ++i) {
std::ifstream in_file(FLAGS_input_file + "_" + FormatName(input_names[i]),
std::ios::in | std::ios::binary);
if (in_file.is_open()) {
in_file.read(reinterpret_cast<char *>(
inputs[input_names[i]].data().get()),
inputs_size[input_names[i]] * sizeof(float));
in_file.close();
} else {
std::cerr << "Open input file failed" << std::endl;
return -1;
}
}
engine->Run(inputs, &outputs);
for (size_t i = 0; i < output_count; ++i) {
std::string output_name =
FLAGS_output_file + "_" + FormatName(output_names[i]);
std::ofstream out_file(output_name, std::ios::binary);
int64_t output_size =
std::accumulate(output_shapes[i].begin(), output_shapes[i].end(), 1,
std::multiplies<int64_t>());
if (out_file.is_open()) {
out_file.write(
reinterpret_cast<char *>(outputs[output_names[i]].data().get()),
output_size * sizeof(float));
out_file.flush();
out_file.close();
} else {
std::cerr << "Open output file failed";
return -1;
}
}
}
std::cout << "Write output" << std::endl;
for (size_t i = 0; i < output_count; ++i) {
std::string output_name =
FLAGS_output_file + "_" + FormatName(output_names[i]);
std::ofstream out_file(output_name, std::ios::binary);
int64_t output_size =
std::accumulate(output_shapes[i].begin(), output_shapes[i].end(), 1,
std::multiplies<int64_t>());
out_file.write(
reinterpret_cast<char *>(outputs[output_names[i]].data().get()),
output_size * sizeof(float));
out_file.flush();
out_file.close();
}
std::cout << "Finished" << std::endl;
return true;
......@@ -304,6 +370,8 @@ int Main(int argc, char **argv) {
std::cout << "output shape: " << FLAGS_output_shape << std::endl;
std::cout << "input_file: " << FLAGS_input_file << std::endl;
std::cout << "output_file: " << FLAGS_output_file << std::endl;
std::cout << "input_dir: " << FLAGS_input_dir << std::endl;
std::cout << "output dir: " << FLAGS_output_dir << std::endl;
std::cout << "model_data_file: " << FLAGS_model_data_file << std::endl;
std::cout << "model_file: " << FLAGS_model_file << std::endl;
std::cout << "device: " << FLAGS_device << std::endl;
......
......@@ -1831,7 +1831,7 @@ class Transformer(base_converter.ConverterInterface):
if range_file:
with open(range_file) as f:
for line in f:
tensor_name, minmax = line.split("@@")
tensor_name, minmax = line.split("@@")[:2]
min_val, max_val = [float(i) for i in
minmax.strip().split(",")]
scale, zero = quantize_util.adjust_range(min_val, max_val,
......
# Quantize stat build
cc_binary(
name = "quantize_stat",
srcs = ["quantize_stat.cc"],
copts = [
"-Werror",
"-Wextra",
],
linkopts = ["-fopenmp"],
linkstatic = 1,
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace/codegen:libmodels",
"//mace/libmace",
],
)
// Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* Usage:
* quantize_stat --model=mobi_mace.pb \
* --input=input_node \
* --output=output_node \
* --input_shape=1,224,224,3 \
* --output_shape=1,224,224,2 \
* --input_dir=input_data_dir \
* --output_file=mace.out \
* --model_data_file=model_data.data
*/
#include <malloc.h>
#include <dirent.h>
#include <stdint.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <numeric>
#include "gflags/gflags.h"
#include "mace/public/mace.h"
#include "mace/utils/env_time.h"
#include "mace/utils/logging.h"
#include "mace/utils/utils.h"
#ifdef MODEL_GRAPH_FORMAT_CODE
#include "mace/codegen/engine/mace_engine_factory.h"
#endif
namespace mace {
namespace tools {
namespace quantization {
namespace str_util {
std::vector<std::string> Split(const std::string &str, char delims) {
std::vector<std::string> result;
if (str.empty()) {
result.push_back("");
return result;
}
std::string tmp = str;
while (!tmp.empty()) {
size_t next_offset = tmp.find(delims);
result.push_back(tmp.substr(0, next_offset));
if (next_offset == std::string::npos) {
break;
} else {
tmp = tmp.substr(next_offset + 1);
}
}
return result;
}
} // namespace str_util
void ParseShape(const std::string &str, std::vector<int64_t> *shape) {
std::string tmp = str;
while (!tmp.empty()) {
int dim = atoi(tmp.data());
shape->push_back(dim);
size_t next_offset = tmp.find(",");
if (next_offset == std::string::npos) {
break;
} else {
tmp = tmp.substr(next_offset + 1);
}
}
}
std::string FormatName(const std::string input) {
std::string res = input;
for (size_t i = 0; i < input.size(); ++i) {
if (!isalnum(res[i])) res[i] = '_';
}
return res;
}
DEFINE_string(model_name,
"",
"model name in yaml");
DEFINE_string(input_node,
"input_node0,input_node1",
"input nodes, separated by comma");
DEFINE_string(input_shape,
"1,224,224,3:1,1,1,10",
"input shapes, separated by colon and comma");
DEFINE_string(output_node,
"output_node0,output_node1",
"output nodes, separated by comma");
DEFINE_string(output_shape,
"1,224,224,2:1,1,1,10",
"output shapes, separated by colon and comma");
DEFINE_string(input_dir,
"",
"input directory name");
DEFINE_string(model_data_file,
"",
"model data file name, used when EMBED_MODEL_DATA set to 0 or 2");
DEFINE_string(model_file,
"",
"model file name, used when load mace model in pb");
DEFINE_int32(omp_num_threads, -1, "num of openmp threads");
bool RunModel(const std::string &model_name,
const std::vector<std::string> &input_names,
const std::vector<std::vector<int64_t>> &input_shapes,
const std::vector<std::string> &output_names,
const std::vector<std::vector<int64_t>> &output_shapes) {
// config runtime
MaceStatus status;
MaceEngineConfig config(DeviceType::CPU);
status = config.SetCPUThreadPolicy(
FLAGS_omp_num_threads,
CPUAffinityPolicy::AFFINITY_NONE);
if (status != MACE_SUCCESS) {
LOG(WARNING) << "Set openmp or cpu affinity failed.";
}
std::vector<unsigned char> model_pb_data;
if (FLAGS_model_file != "") {
if (!mace::ReadBinaryFile(&model_pb_data, FLAGS_model_file)) {
LOG(FATAL) << "Failed to read file: " << FLAGS_model_file;
}
}
std::shared_ptr<mace::MaceEngine> engine;
// Create Engine
#ifdef MODEL_GRAPH_FORMAT_CODE
MACE_RETURN_IF_ERROR(
CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file,
input_names,
output_names,
config,
&engine));
#else
(void) (model_name);
MACE_RETURN_IF_ERROR(
CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file,
input_names,
output_names,
config,
&engine));
#endif
const size_t input_count = input_names.size();
const size_t output_count = output_names.size();
std::map<std::string, mace::MaceTensor> inputs;
std::map<std::string, mace::MaceTensor> outputs;
std::map<std::string, int64_t> inputs_size;
for (size_t i = 0; i < input_count; ++i) {
int64_t input_size =
std::accumulate(input_shapes[i].begin(), input_shapes[i].end(), 1,
std::multiplies<int64_t>());
inputs_size[input_names[i]] = input_size;
auto buffer_in = std::shared_ptr<float>(new float[input_size],
std::default_delete<float[]>());
inputs[input_names[i]] = mace::MaceTensor(input_shapes[i], buffer_in);
}
for (size_t i = 0; i < output_count; ++i) {
int64_t output_size =
std::accumulate(output_shapes[i].begin(), output_shapes[i].end(), 1,
std::multiplies<int64_t>());
auto buffer_out = std::shared_ptr<float>(new float[output_size],
std::default_delete<float[]>());
outputs[output_names[i]] = mace::MaceTensor(output_shapes[i], buffer_out);
}
DIR *dir_parent;
struct dirent *entry;
dir_parent = opendir(FLAGS_input_dir.c_str());
if (dir_parent) {
while ((entry = readdir(dir_parent))) {
std::string file_name = std::string(entry->d_name);
std::string prefix = FormatName(input_names[0]);
if (file_name.find(prefix) == 0) {
std::string suffix = file_name.substr(prefix.size());
for (size_t i = 0; i < input_count; ++i) {
file_name = FLAGS_input_dir + "/" + FormatName(input_names[i])
+ suffix;
std::ifstream in_file(file_name, std::ios::in | std::ios::binary);
VLOG(2) << "Read " << file_name;
if (in_file.is_open()) {
in_file.read(reinterpret_cast<char *>(
inputs[input_names[i]].data().get()),
inputs_size[input_names[i]] * sizeof(float));
in_file.close();
} else {
LOG(INFO) << "Open input file failed";
return -1;
}
}
MACE_RETURN_IF_ERROR(engine->Run(inputs, &outputs));
}
}
closedir(dir_parent);
} else {
LOG(ERROR) << "Directory " << FLAGS_input_dir << " does not exist.";
}
return true;
}
int Main(int argc, char **argv) {
std::string usage = "quantize stat model\nusage: " + std::string(argv[0])
+ " [flags]";
gflags::SetUsageMessage(usage);
gflags::ParseCommandLineFlags(&argc, &argv, true);
LOG(INFO) << "model name: " << FLAGS_model_name;
LOG(INFO) << "mace version: " << MaceVersion();
LOG(INFO) << "input node: " << FLAGS_input_node;
LOG(INFO) << "input shape: " << FLAGS_input_shape;
LOG(INFO) << "output node: " << FLAGS_output_node;
LOG(INFO) << "output shape: " << FLAGS_output_shape;
LOG(INFO) << "input_dir: " << FLAGS_input_dir;
LOG(INFO) << "model_data_file: " << FLAGS_model_data_file;
LOG(INFO) << "model_file: " << FLAGS_model_file;
LOG(INFO) << "omp_num_threads: " << FLAGS_omp_num_threads;
std::vector<std::string> input_names = str_util::Split(FLAGS_input_node, ',');
std::vector<std::string> output_names =
str_util::Split(FLAGS_output_node, ',');
std::vector<std::string> input_shapes =
str_util::Split(FLAGS_input_shape, ':');
std::vector<std::string> output_shapes =
str_util::Split(FLAGS_output_shape, ':');
const size_t input_count = input_shapes.size();
const size_t output_count = output_shapes.size();
std::vector<std::vector<int64_t>> input_shape_vec(input_count);
std::vector<std::vector<int64_t>> output_shape_vec(output_count);
for (size_t i = 0; i < input_count; ++i) {
ParseShape(input_shapes[i], &input_shape_vec[i]);
}
for (size_t i = 0; i < output_count; ++i) {
ParseShape(output_shapes[i], &output_shape_vec[i]);
}
return RunModel(FLAGS_model_name, input_names, input_shape_vec,
output_names, output_shape_vec);
}
} // namespace quantization
} // namespace tools
} // namespace mace
int main(int argc, char **argv) {
mace::tools::quantization::Main(argc, argv);
}
......@@ -173,6 +173,13 @@ DEFINE_string(input_file,
DEFINE_string(output_file,
"",
"output file name | output file prefix for multiple outputs");
// TODO(liyin): support batch validation
DEFINE_string(input_dir,
"",
"input directory name");
DEFINE_string(output_dir,
"output",
"output directory name");
DEFINE_string(opencl_binary_file,
"",
"compiled opencl binary file path");
......
......@@ -71,7 +71,6 @@ MACE_RUN_STATIC_NAME = "mace_run_static"
MACE_RUN_DYNAMIC_NAME = "mace_run_dynamic"
MACE_RUN_STATIC_TARGET = "//mace/tools/validation:" + MACE_RUN_STATIC_NAME
MACE_RUN_DYNAMIC_TARGET = "//mace/tools/validation:" + MACE_RUN_DYNAMIC_NAME
QUANTIZE_STAT_TARGET = "//mace/tools/quantization:quantize_stat"
EXAMPLE_STATIC_NAME = "example_static"
EXAMPLE_DYNAMIC_NAME = "example_dynamic"
EXAMPLE_STATIC_TARGET = "//mace/examples/cli:" + EXAMPLE_STATIC_NAME
......@@ -969,38 +968,6 @@ def build_mace_run(configs, target_abi, enable_openmp, address_sanitizer,
mace_lib_type == MACELibType.dynamic)
def build_quantize_stat(configs):
library_name = configs[YAMLKeyword.library_name]
build_tmp_binary_dir = get_build_binary_dir(library_name, ABIType.host)
if os.path.exists(build_tmp_binary_dir):
sh.rm("-rf", build_tmp_binary_dir)
os.makedirs(build_tmp_binary_dir)
quantize_stat_target = QUANTIZE_STAT_TARGET
build_arg = ""
six.print_(configs[YAMLKeyword.model_graph_format])
if configs[YAMLKeyword.model_graph_format] == ModelFormat.code:
mace_check(os.path.exists(ENGINE_CODEGEN_DIR),
ModuleName.RUN,
"You should convert model first.")
build_arg = "--per_file_copt=mace/tools/quantization/quantize_stat.cc@-DMODEL_GRAPH_FORMAT_CODE" # noqa
sh_commands.bazel_build(
quantize_stat_target,
abi=ABIType.host,
enable_openmp=True,
symbol_hidden=True,
extra_args=build_arg
)
quantize_stat_filepath = build_tmp_binary_dir + "/quantize_stat"
if os.path.exists(quantize_stat_filepath):
sh.rm("-rf", quantize_stat_filepath)
sh.cp("-f", "bazel-bin/mace/tools/quantization/quantize_stat",
build_tmp_binary_dir)
def build_example(configs, target_abi, enable_openmp, address_sanitizer,
mace_lib_type):
library_name = configs[YAMLKeyword.library_name]
......@@ -1275,12 +1242,15 @@ def run_specific_target(flags, configs, target_abi,
cpu_affinity_policy=flags.cpu_affinity_policy,
gpu_perf_hint=flags.gpu_perf_hint,
gpu_priority_hint=flags.gpu_priority_hint,
input_dir=flags.input_dir,
output_dir=flags.output_dir,
runtime_failure_ratio=flags.runtime_failure_ratio,
address_sanitizer=flags.address_sanitizer,
opencl_binary_file=model_opencl_output_bin_path,
opencl_parameter_file=model_opencl_parameter_path,
libmace_dynamic_library_path=LIBMACE_DYNAMIC_PATH,
link_dynamic=link_dynamic,
quantize_stat=flags.quantize_stat,
)
if flags.validate:
model_file_path, weight_file_path = get_model_files(
......@@ -1340,59 +1310,6 @@ def run_specific_target(flags, configs, target_abi,
opencl_parameter_bin_path)
def run_quantize_stat(flags, configs):
library_name = configs[YAMLKeyword.library_name]
build_tmp_binary_dir = get_build_binary_dir(library_name, ABIType.host)
for model_name in configs[YAMLKeyword.models]:
check_model_converted(library_name, model_name,
configs[YAMLKeyword.model_graph_format],
configs[YAMLKeyword.model_data_format],
ABIType.host)
MaceLogger.header(
StringFormatter.block(
"Run model %s on %s" % (model_name, ABIType.host)))
model_config = configs[YAMLKeyword.models][model_name]
subgraphs = model_config[YAMLKeyword.subgraphs]
_, _, mace_model_dir = \
get_build_model_dirs(library_name, model_name, ABIType.host,
None, None,
model_config[YAMLKeyword.model_file_path])
mace_model_path = ""
if configs[YAMLKeyword.model_graph_format] == ModelFormat.file:
mace_model_path = "%s/%s.pb" % (mace_model_dir, model_name)
p = subprocess.Popen(
[
"env",
"MACE_CPP_MIN_VLOG_LEVEL=%s" % flags.vlog_level,
"MACE_LOG_TENSOR_RANGE=1",
"%s/%s" % (build_tmp_binary_dir, "quantize_stat"),
"--model_name=%s" % model_name,
"--input_node=%s" % ",".join(
subgraphs[0][YAMLKeyword.input_tensors]),
"--output_node=%s" % ",".join(
subgraphs[0][YAMLKeyword.output_tensors]),
"--input_shape=%s" % ":".join(
subgraphs[0][YAMLKeyword.input_shapes]),
"--output_shape=%s" % ":".join(
subgraphs[0][YAMLKeyword.output_shapes]),
"--input_dir=%s" % flags.input_dir,
"--model_data_file=%s/%s.data" % (mace_model_dir, model_name),
"--omp_num_threads=%s" % flags.omp_num_threads,
"--model_file=%s" % mace_model_path,
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = p.communicate()
stdout = err + out
six.print_(stdout)
six.print_("Running finished!\n")
def print_package_summary(package_path):
title = "Library"
header = ["key", "value"]
......@@ -1408,11 +1325,6 @@ def run_mace(flags):
clear_build_dirs(configs[YAMLKeyword.library_name])
if flags.quantize_stat:
build_quantize_stat(configs)
run_quantize_stat(flags, configs)
return
target_socs = configs[YAMLKeyword.target_socs]
if not target_socs or ALL_SOC_TAG in target_socs:
target_socs = sh_commands.adb_get_all_socs()
......@@ -1793,6 +1705,11 @@ def parse_args():
type=str,
default="",
help="quantize stat input dir.")
run.add_argument(
"--output_dir",
type=str,
default="",
help="quantize stat output dir.")
benchmark = subparsers.add_parser(
'benchmark',
parents=[all_type_parent_parser, run_bm_parent_parser],
......
import argparse
import os
import sys
import tensorflow as tf
FLAGS = None
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
help="image file/dir path.")
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="tensor output dir.")
parser.add_argument(
"--image_shape",
type=str,
help="target image shape, e.g, 224,224,3")
parser.add_argument(
"--mean",
type=str,
default="",
help="rgb mean value that should subtract from image value,"
" e.g, 128,128,128.")
return parser.parse_known_args()
def images_to_tensors(input_files, image_shape, mean_values=None):
for i in xrange(len(input_files)):
with tf.Session() as sess:
with tf.gfile.FastGFile(input_files[i], 'rb') as f:
image_data = f.read()
image_data = tf.image.decode_image(image_data,
channels=image_shape[2])
if mean_values:
image_data = tf.cast(image_data, dtype=tf.float32)
mean_tensor = tf.constant(mean_values, dtype=tf.float32,
shape=[1, 1, image_shape[2]])
image_data = (image_data - mean_tensor) / 255.0
else:
image_data = tf.image.convert_image_dtype(image_data,
dtype=tf.float32)
image_data = tf.subtract(image_data, 0.5)
image_data = tf.multiply(image_data, 2.0)
image_data = tf.expand_dims(image_data, 0)
image_data = tf.image.resize_bilinear(image_data,
image_shape[:2],
align_corners=False)
image = sess.run(image_data)
output_file = os.path.join(FLAGS.output_dir, os.path.splitext(
os.path.basename(input_files[i]))[0] + '.dat')
image.tofile(output_file)
def main(unused_args):
if not os.path.exists(FLAGS.input):
print ("input does not exist: %s" % FLAGS.input)
sys.exit(-1)
input_files = []
if os.path.isdir(FLAGS.input):
input_files.extend([os.path.join(FLAGS.input, image)
for image in os.listdir(FLAGS.input)])
else:
input_files.append(FLAGS.input)
image_shape = [int(dim) for dim in FLAGS.image_shape.split(',')]
mean_values = None
if FLAGS.mean:
mean_values = [float(mean) for mean in FLAGS.mean.split(',')]
images_to_tensors(input_files, image_shape, mean_values=mean_values)
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
import argparse
import os
import sys
import numpy as np
import tensorflow as tf
FLAGS = None
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
help="tensor file/dir path.")
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="image output dir.")
parser.add_argument(
"--image_shape",
type=str,
help="target image shape, e.g, 224,224,3")
return parser.parse_known_args()
def tensors_to_images(input_files, image_shape):
for i in xrange(len(input_files)):
with tf.Session() as sess:
tensor_data = np.fromfile(input_files[i], dtype=np.float32) \
.reshape(image_shape)
# use the second channel if it is gray image
if image_shape[2] == 2:
_, tensor_data = tf.split(tensor_data, 2, axis=2)
tensor_data = tf.image.convert_image_dtype(tensor_data,
tf.uint8,
saturate=True)
image_data = tf.image.encode_jpeg(tensor_data, quality=100)
image = sess.run(image_data)
output_file = os.path.join(FLAGS.output_dir, os.path.splitext(
os.path.basename(input_files[i]))[0] + '.jpg')
writer = tf.write_file(output_file, image)
sess.run(writer)
def main(unused_args):
if not os.path.exists(FLAGS.input):
print ("input does not exist: %s" % FLAGS.input)
sys.exit(-1)
input_files = []
if os.path.isdir(FLAGS.input):
input_files.extend([os.path.join(FLAGS.input, tensor)
for tensor in os.listdir(FLAGS.input)])
else:
input_files.append(FLAGS.input)
image_shape = [int(dim) for dim in FLAGS.image_shape.split(',')]
tensors_to_images(input_files, image_shape)
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
......@@ -702,9 +702,12 @@ def tuning_run(abi,
gpu_priority_hint=3,
input_file_name="model_input",
output_file_name="model_out",
input_dir="",
output_dir="",
runtime_failure_ratio=0.0,
address_sanitizer=False,
link_dynamic=False):
link_dynamic=False,
quantize_stat=False):
six.print_("* Run '%s' with round=%s, restart_round=%s, tuning=%s, "
"out_of_range_check=%s, omp_num_threads=%s, "
"cpu_affinity_policy=%s, gpu_perf_hint=%s, "
......@@ -718,30 +721,37 @@ def tuning_run(abi,
if abi == "host":
libmace_dynamic_lib_path = \
os.path.dirname(libmace_dynamic_library_path)
cmd = [
"env",
"LD_LIBRARY_PATH=%s" % libmace_dynamic_lib_path,
"MACE_CPP_MIN_VLOG_LEVEL=%s" % vlog_level,
"MACE_RUNTIME_FAILURE_RATIO=%f" % runtime_failure_ratio,
]
if quantize_stat:
cmd.append("MACE_LOG_TENSOR_RANGE=1")
cmd.extend([
"%s/%s" % (target_dir, target_name),
"--model_name=%s" % model_tag,
"--input_node=%s" % ",".join(input_nodes),
"--output_node=%s" % ",".join(output_nodes),
"--input_shape=%s" % ":".join(input_shapes),
"--output_shape=%s" % ":".join(output_shapes),
"--input_file=%s/%s" % (model_output_dir, input_file_name),
"--output_file=%s/%s" % (model_output_dir, output_file_name),
"--input_dir=%s" % input_dir,
"--output_dir=%s" % output_dir,
"--model_data_file=%s/%s.data" % (mace_model_dir, model_tag),
"--device=%s" % device_type,
"--round=%s" % running_round,
"--restart_round=%s" % restart_round,
"--omp_num_threads=%s" % omp_num_threads,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_path,
])
p = subprocess.Popen(
[
"env",
"LD_LIBRARY_PATH=%s" % libmace_dynamic_lib_path,
"MACE_CPP_MIN_VLOG_LEVEL=%s" % vlog_level,
"MACE_RUNTIME_FAILURE_RATIO=%f" % runtime_failure_ratio,
"%s/%s" % (target_dir, target_name),
"--model_name=%s" % model_tag,
"--input_node=%s" % ",".join(input_nodes),
"--output_node=%s" % ",".join(output_nodes),
"--input_shape=%s" % ":".join(input_shapes),
"--output_shape=%s" % ":".join(output_shapes),
"--input_file=%s/%s" % (model_output_dir, input_file_name),
"--output_file=%s/%s" % (model_output_dir, output_file_name),
"--model_data_file=%s/%s.data" % (mace_model_dir, model_tag),
"--device=%s" % device_type,
"--round=%s" % running_round,
"--restart_round=%s" % restart_round,
"--omp_num_threads=%s" % omp_num_threads,
"--cpu_affinity_policy=%s" % cpu_affinity_policy,
"--gpu_perf_hint=%s" % gpu_perf_hint,
"--gpu_priority_hint=%s" % gpu_priority_hint,
"--model_file=%s" % mace_model_path,
],
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = p.communicate()
......@@ -800,6 +810,8 @@ def tuning_run(abi,
"MACE_LIMIT_OPENCL_KERNEL_TIME=%s" % limit_opencl_kernel_time,
"MACE_RUNTIME_FAILURE_RATIO=%f" % runtime_failure_ratio,
]
if quantize_stat:
adb_cmd.append("MACE_LOG_TENSOR_RANGE=1")
if address_sanitizer:
adb_cmd.extend([
"LD_PRELOAD=%s/%s" % (phone_data_dir,
......@@ -814,6 +826,8 @@ def tuning_run(abi,
"--output_shape=%s" % ":".join(output_shapes),
"--input_file=%s/%s" % (phone_data_dir, input_file_name),
"--output_file=%s/%s" % (phone_data_dir, output_file_name),
"--input_dir=%s" % input_dir,
"--output_dir=%s" % output_dir,
"--model_data_file=%s/%s.data" % (phone_data_dir, model_tag),
"--device=%s" % device_type,
"--round=%s" % running_round,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册