diff --git a/mace/dsp/hexagon_control_wrapper.cc b/mace/dsp/hexagon_control_wrapper.cc index db3ff22a826c020e2f4651676026ffe8c7930682..c6a8c8ec6825b56d94ab2a269eae71024993f375 100644 --- a/mace/dsp/hexagon_control_wrapper.cc +++ b/mace/dsp/hexagon_control_wrapper.cc @@ -80,7 +80,7 @@ bool HexagonControlWrapper::SetupGraph(NetDef net_def) { for (const OperatorDef& op: net_def.op()) { unsigned int op_id; MACE_CHECK(hexagon_nn_op_name_to_id(op.type().data(), &op_id) == 0, - "invalid op: ", op.name()); + "invalid op: ", op.name(), ", type: ", op.type()); vector inputs(op.node_input_size()); for (size_t i = 0; i < op.node_input_size(); ++i) { inputs[i].src_id = node_id(op.node_input(i).node_id()); diff --git a/mace/dsp/ops.h b/mace/dsp/ops.h index f8e5e7f60da5ee7d965de33ace1083801e6884b5..c031e4663c9f835f839555f5b1a5f2cd6c3488e4 100644 --- a/mace/dsp/ops.h +++ b/mace/dsp/ops.h @@ -157,8 +157,11 @@ DEF_OP_WREF(QuantizedMinimum_8) DEF_OP(Pad_f) DEF_OP(SpaceToBatchND_f) DEF_OP(BatchToSpaceND_f) +DEF_OP(QuantizedSpaceToBatchND_8) +DEF_OP(QuantizedBatchToSpaceND_8) DEF_OP(QuantizedPad_8) DEF_OP(ResizeBilinear_f) +DEF_OP(QuantizedResizeBilinear_8) DEF_OP(ConcatV2_f) DEF_OP(ConcatV2_int32) DEF_OP(Prod_int32) diff --git a/mace/python/tools/dsp_ops.py b/mace/python/tools/dsp_ops.py index 8874b0b33a3a42fa01a07bed0fb2b601dfad2b69..bd79b53ada3e970162919ee08e834ee224f23b07 100644 --- a/mace/python/tools/dsp_ops.py +++ b/mace/python/tools/dsp_ops.py @@ -18,6 +18,9 @@ class DspOps(object): 'QuantizedAvgPool': 'QuantizedAvgPool_8', 'QuantizedConcat': 'QuantizedConcat_8', 'QuantizedBiasAdd': 'QuantizedBiasAdd_8p8to32', + 'QuantizedResizeBilinear' : 'QuantizedResizeBilinear_8', + 'QuantizedSpaceToBatchND': 'QuantizedSpaceToBatchND_8', + 'QuantizedBatchToSpaceND': 'QuantizedBatchToSpaceND_8', 'Min': 'Min_f', 'Max': 'Max_f', 'QuantizeV2': 'Quantize', diff --git a/mace/python/tools/tf_dsp_converter_lib.py b/mace/python/tools/tf_dsp_converter_lib.py index 28cb19991e9a8ccc22ae0d39ad8777e58dd1aab3..3e7c4475a36f3aa02f9fd22554475cf091807afb 100644 --- a/mace/python/tools/tf_dsp_converter_lib.py +++ b/mace/python/tools/tf_dsp_converter_lib.py @@ -17,6 +17,7 @@ padding_mode = { node_count = 0 node_ids = {} +resolved_ops = set() def max_elem_size(tensor): if len(tensor.shape.as_list()) == 0: @@ -72,7 +73,10 @@ def convert_ops(unresolved_ops, net_def, output_node, dsp_ops): print ('Op: ', first_op.name, first_op.type, first_op.outputs[0].shape) - if first_op.type == 'Const': + if first_op.name in resolved_ops: + pass + + elif first_op.type == 'Const': print ('Add const node: ', first_op.name) tf_tensor = first_op.outputs[0].eval() tensor = net_def.tensors.add() @@ -99,10 +103,34 @@ def convert_ops(unresolved_ops, net_def, output_node, dsp_ops): op_def.type = dsp_ops.map_nn_op(first_op.type) op_def.node_id = node_count node_count += 1 - register_node_id(op_def.name, op_def.node_id) - op_def.padding = padding_mode['NA'] - if has_padding_and_strides(first_op): + + if len(first_op.outputs) > 0 and first_op.type == 'Dequantize' \ + and len(first_op.outputs[0].consumers()) > 0 \ + and (first_op.outputs[0].consumers()[0].type == 'SpaceToBatchND' \ + or first_op.outputs[0].consumers()[0].type == 'BatchToSpaceND'): + input_tensor = first_op.inputs[0] + min_tensor = first_op.inputs[1] + max_tensor = first_op.inputs[2] + s2b_op = first_op.outputs[0].consumers()[0] + reshape_op = s2b_op.outputs[0].consumers()[0] + min_op = reshape_op.outputs[0].consumers()[0] + max_op = reshape_op.outputs[0].consumers()[1] + quantize_op = min_op.outputs[0].consumers()[0] + resolved_ops.add(s2b_op.name) + resolved_ops.add(reshape_op.name) + resolved_ops.add(min_op.name) + resolved_ops.add(max_op.name) + resolved_ops.add(quantize_op.name) + + op_def.name = quantize_op.name + op_def.type = dsp_ops.map_nn_op('Quantized' + s2b_op.type) + op_def.input.append(input_tensor.name) + op_def.input.extend([t.name for t in s2b_op.inputs[1:]]) + op_def.input.extend([min_tensor.name, max_tensor.name]) + op_def.out_max_byte_size.extend([max_elem_size(out) for out in quantize_op.outputs]) + + elif has_padding_and_strides(first_op): op_def.padding = padding_mode[first_op.get_attr('padding')] op_def.input.extend([t.name for t in first_op.inputs]) if 'ksize' in first_op.node_def.attr: @@ -138,6 +166,8 @@ def convert_ops(unresolved_ops, net_def, output_node, dsp_ops): else: raise Exception('Unsupported op: ', first_op) + register_node_id(op_def.name, op_def.node_id) + print ('Add op node: ', first_op.name) for t in op_def.input: node, port = t.split(':') @@ -146,6 +176,8 @@ def convert_ops(unresolved_ops, net_def, output_node, dsp_ops): node_input.node_id = node_id node_input.output_port = int(port) + resolved_ops.add(first_op.name) + for i in range(resolved_count): del unresolved_ops[0] @@ -190,8 +222,14 @@ def convert_to_mace_pb(input_graph_def, input_dim, output_node): with session.graph.as_default() as graph: tf.import_graph_def(input_graph_def, name="") ops = graph.get_operations() - unresolved_ops = ops dsp_ops = DspOps() + # convert const node + unresolved_ops = [op for op in ops if op.type == 'Const'] + while len(unresolved_ops) > 0: + convert_ops(unresolved_ops, net_def, output_node, dsp_ops) + + # convert op node + unresolved_ops = [op for op in ops if op.type != 'Const'] while len(unresolved_ops) > 0: convert_ops(unresolved_ops, net_def, output_node, dsp_ops)