提交 9985cbf4 编写于 作者: L liuqi

Support user specified input files.

上级 cc6e52a4
......@@ -16,12 +16,12 @@ class ProposalOp : public Operator<D, T> {
public:
ProposalOp(const OperatorDef &operator_def, Workspace *ws)
: Operator<D, T>(operator_def, ws),
functor_(OperatorBase::GetSingleArgument<int>("min_size", 0),
OperatorBase::GetSingleArgument<float>("nms_thresh", 0),
OperatorBase::GetSingleArgument<int>("pre_nms_top_n", 0),
OperatorBase::GetSingleArgument<int>("post_nms_top_n", 0),
functor_(OperatorBase::GetSingleArgument<int>("min_size", 16),
OperatorBase::GetSingleArgument<float>("nms_thresh", 0.7),
OperatorBase::GetSingleArgument<int>("pre_nms_top_n", 6000),
OperatorBase::GetSingleArgument<int>("post_nms_top_n", 300),
OperatorBase::GetSingleArgument<int>("feat_stride", 0),
OperatorBase::GetSingleArgument<int>("base_size", 16),
OperatorBase::GetSingleArgument<int>("base_size", 12),
OperatorBase::GetRepeatedArgument<int>("scales"),
OperatorBase::GetRepeatedArgument<float>("ratios")) {}
......
......@@ -404,7 +404,9 @@ message LayerParameter {
optional ParameterParameter parameter_param = 145;
optional PoolingParameter pooling_param = 121;
optional PowerParameter power_param = 122;
optional ProposalParameter proposal_param = 8266713;
optional PReLUParameter prelu_param = 131;
optional PSROIAlignParameter psroi_align_param = 1490;
optional PythonParameter python_param = 130;
optional RecurrentParameter recurrent_param = 146;
optional ReductionParameter reduction_param = 136;
......@@ -944,6 +946,19 @@ message PowerParameter {
optional float shift = 3 [default = 0.0];
}
// Message that stores parameters used by ProposalLayer
message ProposalParameter {
optional uint32 feat_stride = 1 [default = 16];
repeated uint32 scales = 2;
repeated float ratios = 3;
}
message PSROIAlignParameter {
required float spatial_scale = 1;
required int32 output_dim = 2; // output channel number
required int32 group_size = 3; // number of groups to encode position-sensitive score maps
}
message PythonParameter {
optional string module = 1;
optional string layer = 2;
......
......@@ -784,21 +784,89 @@ class CaffeConverter(object):
self.net_def.op.extend([op_def])
self.resolved_ops.add(op.name)
def convert_reshape(self, op):
op_def = self.CommonConvert(op, op.type)
input_shape = op.parents[0].output_shape_map[op.layer.bottom[0]]
output_shape = input_shape
shape_param = np.asarray(op.layer.reshape_param.shape.dim)[[0, 2, 3, 1]]
print shape_param
for i in range(len(shape_param)):
if shape_param[i] != 0:
output_shape[i] = shape_param[i]
shape_arg = op_def.arg.add()
shape_arg.name = 'shape'
shape_arg.ints.extend(output_shape)
op.output_shape_map[op.layer.top[0]] = output_shape
self.add_output_shape(op_def, output_shape)
op_def.output.extend([op.name + ':0'])
self.net_def.op.extend([op_def])
self.resolved_ops.add(op.name)
def convert_proposal_op(self, op):
assert self.device == 'cpu'
op_def = self.CommonConvert(op, op.type)
if op.layer.HasField('proposal_param'):
proposal_param = op.layer.proposal_param
feat_stride_arg = op_def.arg.add()
feat_stride_arg.name = 'feat_stride'
feat_stride_arg.i = proposal_param.feat_stride
scales_arg = op_def.arg.add()
scales_arg.name = 'scales'
scales_arg.ints.extend(list(proposal_param.scales))
ratios_arg = op_def.arg.add()
ratios_arg.name = 'ratios'
ratios_arg.floats.extend(list(proposal_param.ratios))
output_shape = op.parents[0].output_shape_map[op.layer.bottom[0]]
op.output_shape_map[op.layer.top[0]] = output_shape
self.add_output_shape(op_def, output_shape)
op_def.output.extend([op.name + ':0'])
self.net_def.op.extend([op_def])
self.resolved_ops.add(op.name)
def convert_psroi_align(self, op):
assert self.device == 'cpu'
op_def = self.CommonConvert(op, op.type)
if op.layer.HasField('psroi_align_param'):
psroi_align_param = op.layer.psroi_align_param
spatial_scale_arg = op_def.arg.add()
spatial_scale_arg.name = 'spatial_scale'
spatial_scale_arg.f = psroi_align_param.spatial_scale
output_dim_arg = op_def.arg.add()
output_dim_arg.name = 'output_dim'
output_dim_arg.i = psroi_align_param.output_dim
group_size_arg = op_def.arg.add()
group_size_arg.name = 'group_size'
group_size_arg.i = psroi_align_param.group_size
output_shape = op.parents[0].output_shape_map[op.layer.bottom[0]]
op.output_shape_map[op.layer.top[0]] = output_shape
self.add_output_shape(op_def, output_shape)
op_def.output.extend([op.name + ':0'])
self.net_def.op.extend([op_def])
self.resolved_ops.add(op.name)
def replace_in_out_name(self, input_names, output_names, is_single):
in_names = set([input_name + ":0" for input_name in input_names])
out_names = set([output_name + ":0" for output_name in output_names])
if is_single:
for op in self.net_def.op:
if len(op.input) > 0 and op.input[0] in in_names:
op.input[0] = MACE_INPUT_NODE_NAME + ':0'
if len(op.output) > 0 and op.output[0] in out_names:
op.output[0] = MACE_OUTPUT_NODE_NAME + ':0'
for i in range(len(op.input)):
if op.input[i] in in_names:
op.input[i] = MACE_INPUT_NODE_NAME + ':0'
for i in range(len(op.output)):
if op.output[i] in out_names:
op.output[i] = MACE_OUTPUT_NODE_NAME + ':0'
else:
for op in self.net_def.op:
if len(op.input) > 0 and op.input[0] in in_names:
op.input[0] = MACE_INPUT_NODE_NAME + '_' + op.input[0]
if len(op.output) > 0 and op.output[0] in out_names:
op.output[0] = MACE_OUTPUT_NODE_NAME + '_' + op.output[0]
for i in range(len(op.input)):
if op.input[i] in in_names:
op.input[i] = MACE_INPUT_NODE_NAME + '_' + op.input[i]
if op.input[i] in out_names:
op.input[i] = MACE_OUTPUT_NODE_NAME + '_' + op.input[i]
for i in range(len(op.output)):
if op.output[i] in in_names:
op.output[i] = MACE_INPUT_NODE_NAME + '_' + op.output[i]
if op.output[i] in out_names:
op.output[i] = MACE_OUTPUT_NODE_NAME + '_' + op.output[i]
def add_input_op_shape(self, input_nodes, input_shapes):
assert len(input_nodes) == len(input_shapes)
......@@ -843,10 +911,16 @@ class CaffeConverter(object):
self.convert_concat(op)
elif op.type == 'Eltwise':
self.convert_eltwise(op)
elif op.type in ['Softmax']:
self.convert_normal_op(op)
elif op.type == 'Slice':
self.convert_slice(op)
elif op.type == 'Reshape':
self.convert_reshape(op)
elif op.type == 'Proposal':
self.convert_proposal_op(op)
elif op.type == 'PSROIAlign':
self.convert_psroi_align(op)
elif op.type in ['Softmax']:
self.convert_normal_op(op)
else:
raise Exception('Unknown Op: %s, type: %s' % (op.name, op.type))
......
......@@ -2,8 +2,6 @@
LIBMACE_TAG=`git describe --abbrev=0 --tags`
MACE_SOURCE_DIR=`/bin/pwd`
INPUT_FILE_NAME="model_input"
OUTPUT_FILE_NAME="model_out"
PHONE_DATA_DIR="/data/local/tmp/mace_run"
KERNEL_DIR="${PHONE_DATA_DIR}/cl/"
CODEGEN_DIR=${MACE_SOURCE_DIR}/mace/codegen
......
......@@ -14,6 +14,7 @@ import subprocess
import sys
import urllib
import yaml
import re
import adb_tools
......@@ -64,13 +65,37 @@ def clear_env(target_soc):
command = "bash tools/clear_env.sh {}".format(target_soc)
run_command(command)
def input_file_name(input_name):
return os.environ['INPUT_FILE_NAME'] + '_' + \
re.sub('[^0-9a-zA-Z]+', '_', input_name)
def generate_random_input(target_soc, model_output_dir):
def generate_random_input(target_soc, model_output_dir,
input_names, input_files):
generate_data_or_not = True
command = "bash tools/validate_tools.sh {} {} {}".format(
target_soc, model_output_dir, int(generate_data_or_not))
run_command(command)
input_name_list = []
input_file_list = []
if isinstance(input_names, list):
input_name_list.extend(input_names)
else:
input_name_list.append(input_names)
if isinstance(input_files, list):
input_file_list.extend(input_files)
else:
input_file_list.append(input_files)
assert len(input_file_list) == len(input_name_list)
for i in range(len(input_file_list)):
if input_file_list[i] is not None:
dst_input_file = model_output_dir + '/' + input_file_name(input_name_list[i])
if input_file_list[i].startswith("http://") or \
input_file_list[i].startswith("https://"):
urllib.urlretrieve(input_file_list[i], dst_input_file)
else:
print 'Copy input data:', dst_input_file
shutil.copy(input_file_list[i], dst_input_file)
def generate_model_code():
command = "bash tools/generate_model_code.sh"
......@@ -210,6 +235,13 @@ def parse_args():
help="SoCs to build, comma seperated list (getprop ro.board.platform)")
return parser.parse_known_args()
def set_environment(configs):
os.environ["EMBED_MODEL_DATA"] = str(configs["embed_model_data"])
os.environ["VLOG_LEVEL"] = str(configs["vlog_level"])
os.environ["PROJECT_NAME"] = os.path.splitext(os.path.basename(
FLAGS.config))[0]
os.environ['INPUT_FILE_NAME'] = "model_input"
os.environ['OUTPUT_FILE_NAME'] = "model_out"
def main(unused_args):
configs = parse_model_configs()
......@@ -218,10 +250,7 @@ def main(unused_args):
FLAGS.round = 1
FLAGS.restart_round = 1
os.environ["EMBED_MODEL_DATA"] = str(configs["embed_model_data"])
os.environ["VLOG_LEVEL"] = str(configs["vlog_level"])
os.environ["PROJECT_NAME"] = os.path.splitext(os.path.basename(
FLAGS.config))[0]
set_environment(configs)
if FLAGS.mode == "build" or FLAGS.mode == "all":
# Remove previous output dirs
......@@ -260,6 +289,7 @@ def main(unused_args):
print '=======================', model_name, '======================='
skip_validation = configs["models"][model_name].get("skip_validation", 0)
model_config = configs["models"][model_name]
input_file_list = model_config.get("input_files", [])
for key in model_config:
if key in ['input_nodes', 'output_nodes'] and isinstance(
model_config[key], list):
......@@ -302,7 +332,8 @@ def main(unused_args):
if FLAGS.mode == "build" or FLAGS.mode == "run" or FLAGS.mode == "validate"\
or FLAGS.mode == "benchmark" or FLAGS.mode == "all":
generate_random_input(target_soc, model_output_dir)
generate_random_input(target_soc, model_output_dir,
model_config['input_nodes'], input_file_list)
if FLAGS.mode == "build" or FLAGS.mode == "all":
generate_model_code()
......@@ -327,7 +358,7 @@ def main(unused_args):
if FLAGS.mode == "throughput_test":
merged_lib_file = FLAGS.output_dir + "/%s/%s/libmace_%s.%s.a" % \
(os.environ["PROJECT_NAME"], target_abi, os.environ["PROJECT_NAME"], target_soc)
generate_random_input(target_soc, FLAGS.output_dir)
generate_random_input(target_soc, FLAGS.output_dir, [], [])
for model_name in configs["models"]:
runtime = configs["models"][model_name]["runtime"]
os.environ["%s_MODEL_TAG" % runtime.upper()] = model_name
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册