提交 8b18778a 编写于 作者: L liuqi

Support multiple input or output API.

上级 5b7635f6
...@@ -120,16 +120,22 @@ class CaffeConverter(object): ...@@ -120,16 +120,22 @@ class CaffeConverter(object):
self.device = device self.device = device
self.winograd = winograd self.winograd = winograd
self.resolved_ops = set() self.resolved_ops = set()
self.ops = []
layers = caffe_net.layer # Add Input operations
inputs = caffe_net.input
for input in inputs:
self.ops.extend([Operator(input, 'Input', None)])
layers = caffe_net.layer
# remove train layers and dropout # remove train layers and dropout
layers = self.remove_unused_layers(layers) layers = self.remove_unused_layers(layers)
# Construct graph # Construct graph
# Only support single-output layer # Only support single-output layer
# layer with single output often use the same top name. # layer with single output often use the same top name.
self.ops = [Operator(layer.name, layer.type, layer) for layer in layers] self.ops.extend([Operator(layer.name, layer.type, layer) for layer in layers])
self.ops_map = {op.name : op for op in self.ops} self.ops_map = {op.name : op for op in self.ops}
output_op = {} output_op = {}
for layer in layers: for layer in layers:
...@@ -232,36 +238,43 @@ class CaffeConverter(object): ...@@ -232,36 +238,43 @@ class CaffeConverter(object):
arg.i = self.dt arg.i = self.dt
return output_name return output_name
def add_input_transform(self, name): def add_input_transform(self, names, is_single):
new_input_name = MACE_INPUT_NODE_NAME + ":0" for name in names:
op_def = self.net_def.op.add() if is_single:
op_def.name = name new_input_name = MACE_INPUT_NODE_NAME + ":0"
op_def.type = 'BufferToImage' else:
op_def.input.extend([new_input_name]) new_input_name = MACE_INPUT_NODE_NAME + '_' + name + ":0"
if name not in self.ops_map: op_def = self.net_def.op.add()
raise Exception("Input name not in the model") op_def.name = name
top_name = self.ops_map[name].layer.top[0] op_def.type = 'BufferToImage'
op_def.output.extend([top_name+':0']) op_def.input.extend([new_input_name])
if name not in self.ops_map:
epsilon_arg = op_def.arg.add() raise Exception("Input name not in the model")
epsilon_arg.name = 'buffer_type' op_def.output.extend([name+':0'])
epsilon_arg.i = buffer_type_map['IN_OUT_CHANNEL']
epsilon_arg = op_def.arg.add()
arg = op_def.arg.add() epsilon_arg.name = 'buffer_type'
arg.name = 'T' epsilon_arg.i = buffer_type_map['IN_OUT_CHANNEL']
arg.i = self.dt
arg = op_def.arg.add()
def add_output_transform(self, name): arg.name = 'T'
output_name = MACE_OUTPUT_NODE_NAME + ":0" arg.i = self.dt
op_def = self.net_def.op.add()
op_def.name = output_name[:-2] def add_output_transform(self, names, is_single):
op_def.type = 'ImageToBuffer' for name in names:
op_def.input.extend([name+':0']) if is_single:
op_def.output.extend([output_name]) output_name = MACE_OUTPUT_NODE_NAME + ":0"
else:
output_name = MACE_OUTPUT_NODE_NAME + '_' + name + ":0"
op_def = self.net_def.op.add()
op_def.name = output_name[:-2]
op_def.type = 'ImageToBuffer'
op_def.input.extend([name+':0'])
op_def.output.extend([output_name])
epsilon_arg = op_def.arg.add() epsilon_arg = op_def.arg.add()
epsilon_arg.name = 'buffer_type' epsilon_arg.name = 'buffer_type'
epsilon_arg.i = buffer_type_map['IN_OUT_CHANNEL'] epsilon_arg.i = buffer_type_map['IN_OUT_CHANNEL']
def add_tensor(self, name, value): def add_tensor(self, name, value):
tensor = self.net_def.tensors.add() tensor = self.net_def.tensors.add()
...@@ -587,33 +600,35 @@ class CaffeConverter(object): ...@@ -587,33 +600,35 @@ class CaffeConverter(object):
self.net_def.op.extend([op_def]) self.net_def.op.extend([op_def])
self.resolved_ops.add(op.name) self.resolved_ops.add(op.name)
def replace_in_out_name(self, input_name, output_name): def replace_in_out_name(self, input_names, output_names, is_single):
input_name = input_name + ":0" in_names = set([input_name + ":0" for input_name in input_names])
output_name = output_name + ":0" out_names = set([output_name + ":0" for output_name in output_names])
for op in self.net_def.op: if is_single:
if len(op.input) > 0 and op.input[0] == input_name: for op in self.net_def.op:
op.input[0] = MACE_INPUT_NODE_NAME + ":0" if len(op.input) > 0 and op.input[0] in in_names:
if len(op.output) > 0 and op.output[0] == output_name: op.input[0] = MACE_INPUT_NODE_NAME + ':0'
op.output[0] = MACE_OUTPUT_NODE_NAME + ":0" if len(op.output) > 0 and op.output[0] in out_names:
op.output[0] = MACE_OUTPUT_NODE_NAME + ':0'
def add_input_op_shape(self, input_node, input_shape): else:
if not input_shape: for op in self.net_def.op:
input_shape = [] if len(op.input) > 0 and op.input[0] in in_names:
if self.caffe_net.input_dim: op.input[0] = MACE_INPUT_NODE_NAME + '_' + op.input[0]
input_shape = self.caffe_net.input_dim if len(op.output) > 0 and op.output[0] in out_names:
elif self.caffe_net.input_shape: op.output[0] = MACE_OUTPUT_NODE_NAME + '_' + op.output[0]
input_shape = self.caffe_net.input_shape[0].dim
elif self.caffe_net.layer[0].input_param.shape: def add_input_op_shape(self, input_nodes, input_shapes):
input_shape = self.caffe_net.layer[0].input_param.shape[0].dim assert len(input_nodes) == len(input_shapes)
input_op = self.ops_map[input_node] for i in range(len(input_nodes)):
input_op.output_shape = input_shape input_op = self.ops_map[input_nodes[i]]
input_op.output_shape = input_shapes[i]
def convert(self, input_node, input_shape, output_node):
def convert(self, input_nodes, input_shapes, output_nodes):
is_single = len(input_nodes) == 1 and len(output_nodes) == 1
if self.device == 'gpu': if self.device == 'gpu':
self.add_input_transform(input_node) self.add_input_transform(input_nodes, is_single)
assert self.ops[0].type == 'Input' assert self.ops[0].type == 'Input'
self.add_input_op_shape(input_node, input_shape) self.add_input_op_shape(input_nodes, input_shapes)
for op in self.ops: for op in self.ops:
if op.name in self.resolved_ops: if op.name in self.resolved_ops:
...@@ -644,17 +659,17 @@ class CaffeConverter(object): ...@@ -644,17 +659,17 @@ class CaffeConverter(object):
raise Exception('Unknown Op: %s, type: %s' % (op.name, op.type)) raise Exception('Unknown Op: %s, type: %s' % (op.name, op.type))
if self.device == 'gpu': if self.device == 'gpu':
self.add_output_transform(output_node) self.add_output_transform(output_nodes, is_single)
if self.device == 'cpu': if self.device == 'cpu':
self.replace_in_out_name(input_node, output_node) self.replace_in_out_name(input_nodes, output_nodes, is_single)
for op in self.ops: for op in self.ops:
if op.name not in self.resolved_ops: if op.name not in self.resolved_ops:
print 'Unresolve Op: %s with type %s' % (op.name, op.type) print 'Unresolve Op: %s with type %s' % (op.name, op.type)
def convert_to_mace_pb(model_file, weight_file, input_node, input_shape, output_node, data_type, device, winograd): def convert_to_mace_pb(model_file, weight_file, input_node_str, input_shape_str, output_node_str, data_type, device, winograd):
net_def = mace_pb2.NetDef() net_def = mace_pb2.NetDef()
dt = data_type_map[data_type] dt = data_type_map[data_type]
...@@ -666,8 +681,17 @@ def convert_to_mace_pb(model_file, weight_file, input_node, input_shape, output_ ...@@ -666,8 +681,17 @@ def convert_to_mace_pb(model_file, weight_file, input_node, input_shape, output_
with open(weight_file, "rb") as f: with open(weight_file, "rb") as f:
weights.MergeFromString(f.read()) weights.MergeFromString(f.read())
input_nodes = [x for x in input_node_str.split(',')]
input_shapes = []
if input_shape_str != "":
input_shape_strs = [x for x in input_shape_str.split(':')]
for shape_str in input_shape_strs:
input_shapes.extend([[int(x) for x in shape_str.split(',')]])
output_nodes = [x for x in output_node_str.split(',')]
assert len(input_nodes) == len(input_shapes)
converter = CaffeConverter(caffe_net, weights, net_def, dt, device, winograd) converter = CaffeConverter(caffe_net, weights, net_def, dt, device, winograd)
converter.convert(input_node, input_shape, output_node) converter.convert(input_nodes, input_shapes, output_nodes)
print "PB Converted." print "PB Converted."
if device == 'gpu': if device == 'gpu':
print "start optimize memory." print "start optimize memory."
......
...@@ -39,12 +39,9 @@ def main(unused_args): ...@@ -39,12 +39,9 @@ def main(unused_args):
print("DSP not support caffe model yet.") print("DSP not support caffe model yet.")
return -1 return -1
input_shape = []
if FLAGS.input_shape != "":
input_shape.extend([int(x) for x in FLAGS.input_shape.split(',')])
from lib.python.tools import caffe_converter_lib from lib.python.tools import caffe_converter_lib
output_graph_def = caffe_converter_lib.convert_to_mace_pb( output_graph_def = caffe_converter_lib.convert_to_mace_pb(
FLAGS.model_file, FLAGS.weight_file, FLAGS.input_node, input_shape, FLAGS.output_node, FLAGS.model_file, FLAGS.weight_file, FLAGS.input_node, FLAGS.input_shape, FLAGS.output_node,
FLAGS.data_type, FLAGS.runtime, FLAGS.winograd) FLAGS.data_type, FLAGS.runtime, FLAGS.winograd)
elif FLAGS.platform == 'tensorflow': elif FLAGS.platform == 'tensorflow':
if FLAGS.runtime == 'dsp': if FLAGS.runtime == 'dsp':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册