diff --git a/mace/python/tools/converter_tool/onnx_converter.py b/mace/python/tools/converter_tool/onnx_converter.py index a6e8d4623e4f0d3178ae8a077c9b6446fe7cd50d..61f45e548a1c85cd26530ff41e7841978b557588 100644 --- a/mace/python/tools/converter_tool/onnx_converter.py +++ b/mace/python/tools/converter_tool/onnx_converter.py @@ -941,6 +941,33 @@ class OnnxConverter(base_converter.ConverterInterface): coeff_arg = op.arg.add() coeff_arg.name = MaceKeyword.mace_coeff_str coeff_arg.floats.extend([min_value, max_value]) + elif len(node.inputs) == 2: + if node.inputs[1] in self._consts and \ + node.inputs[0] not in self._consts: + const_name = node.inputs[1] + const_tensor = self._consts[const_name] + if len(const_tensor.dims) == 0: + value_arg = op.arg.add() + value_arg.name = MaceKeyword.mace_scalar_input_str + value_arg.f = const_tensor.float_data[0] + value_index_arg = op.arg.add() + value_index_arg.name = \ + MaceKeyword.mace_scalar_input_index_str + value_index_arg.i = 1 + del op.input[1] + elif node.inputs[0] in self._consts and \ + node.inputs[1] not in self._consts: + const_name = node.inputs[0] + const_tensor = self._consts[const_name] + if len(const_tensor.dims) == 0: + value_arg = op.arg.add() + value_arg.name = MaceKeyword.mace_scalar_input_str + value_arg.f = const_tensor.float_data[0] + value_index_arg = op.arg.add() + value_index_arg.name = \ + MaceKeyword.mace_scalar_input_index_str + value_index_arg.i = 0 + del op.input[0] @staticmethod def copy_node_attr(op, node, attr_name, dtype=AttributeType.INT, diff --git a/tools/onnx_optimizer.py b/tools/onnx_optimizer.py index b877cd89eef7713c59e7fe82286a62057de1b6a6..8a1a780621e1fd3f5d8f69e1c5727e4c848e5a2b 100644 --- a/tools/onnx_optimizer.py +++ b/tools/onnx_optimizer.py @@ -22,12 +22,12 @@ from onnx import optimizer def main(): if len(sys.argv) != 3: - print "Usage: python onnx_optimizer.py model.onnx model_opt.onnx" + print("Usage: python onnx_optimizer.py model.onnx model_opt.onnx") sys.exit(0) in_path = sys.argv[1] out_path = sys.argv[2] original_model = onnx.load(in_path) - print "Start optimize ONNX model for inference:" + print("Start optimize ONNX model for inference:") passes = ['eliminate_identity', 'fuse_consecutive_squeezes', 'fuse_consecutive_transposes', @@ -35,15 +35,14 @@ def main(): 'eliminate_nop_transpose', 'eliminate_unused_initializer', 'extract_constant_to_initializer', - 'fuse_add_bias_into_conv', 'fuse_bn_into_conv', 'fuse_transpose_into_gemm'] for i in range(len(passes)): - print i, ".", passes[i] + print("%s.%s" % (i, passes[i])) optimized_model = optimizer.optimize(original_model, passes) onnx.save_model(optimized_model, out_path) - print "Optimize Finished!" - print "Please check new model in:", out_path + print("Optimize Finished!") + print("Please check new model in:", out_path) if __name__ == '__main__':