From 148c198e965f0691a2f96032f8684b19dd61057f Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 19 Jun 2020 06:51:12 +0000 Subject: [PATCH] add im2sequence support --- x2paddle/__init__.py | 2 +- .../paddle_custom_layer/im2sequence.py | 80 +++++++++ x2paddle/op_mapper/paddle_op_mapper.py | 163 ++++++++++-------- 3 files changed, 168 insertions(+), 77 deletions(-) create mode 100644 x2paddle/op_mapper/paddle_custom_layer/im2sequence.py diff --git a/x2paddle/__init__.py b/x2paddle/__init__.py index bc8c296..ed9d4d8 100644 --- a/x2paddle/__init__.py +++ b/x2paddle/__init__.py @@ -1 +1 @@ -__version__ = "0.7.2" +__version__ = "0.7.4" diff --git a/x2paddle/op_mapper/paddle_custom_layer/im2sequence.py b/x2paddle/op_mapper/paddle_custom_layer/im2sequence.py new file mode 100644 index 0000000..91fa2ca --- /dev/null +++ b/x2paddle/op_mapper/paddle_custom_layer/im2sequence.py @@ -0,0 +1,80 @@ +import onnx +import numpy as np +from onnx import onnx_pb, helper + +im2seq_counter = 0 + + +def im2sequence(op, block): + global im2sequence_counter + n, c, h, w = block.var(op.input('X')[0]).shape + assert h > 0 and w > 0, "Only supported fixed input shape for im2sequence operator." + stride_h, stride_w = op.attr('strides') + paddings = op.attr('paddings') + assert op.attr( + 'out_stride' + ) != 1, "Only out_stride==1 is supported for im2sequence operator." + h = h + paddings[0] + paddings[1] + w = w + paddings[1] + paddings[2] + kernel_h, kernel_w = op.attr('kernels') + out_h = 1 + (h - kernel_h + stride_h - 1) // stride_h + out_w = 1 + (w - kernel_w + stride_w - 1) // stride_w + h_steps = list() + for i in range(out_h): + h_steps.append([i * stride_h, i * stride_h + kernel_h]) + w_steps = list() + for i in range(out_w): + w_steps.append([i * stride_w, i * stride_w + kernel_w]) + + nodes = list() + slice_blocks = list() + for i in range(out_h): + for j in range(out_w): + starts_name = "im2sequence.starts.{}.{}.{}".format( + im2seq_counter, i, j) + starts_tensor = helper.make_tensor( + name=starts_name, + data_type=onnx_pb.TensorProto.INT64, + dims=[4], + vals=[0, 0, h_steps[i][0], w_steps[j][0]]) + ends_name = "im2sequence.ends.{}.{}.{}".format(im2seq_counter, i, j) + ends_tensor = helper.make_tensor( + name=ends_name, + data_type=onnx_pb.TensorProto.INT64, + dims=[4], + vals=[999999, 999999, h_steps[i][1], w_steps[j][1]]) + starts_node = helper.make_node( + 'Constant', + inputs=[], + outputs=[starts_name], + value=starts_tensor) + ends_node = helper.make_node( + 'Constant', inputs=[], outputs=[ends_name], value=ends_tensor) + nodes.extend([starts_node, ends_node]) + + slice_block_name = "im2sequence.slice.{}.{}.{}".format( + im2seq_counter, i, j) + slice_block_node = helper.make_node( + 'Slice', + inputs=[op.input('X')[0], starts_name, ends_name], + outputs=[slice_block_name]) + flatten_block_name = "im2sequence.flatten.{}.{}.{}".format( + im2seq_counter, i, j) + flatten_block_node = helper.make_node( + "Flatten", + inputs=[slice_block_name], + outputs=[flatten_block_name], + axis=0) + nodes.extend([slice_block_node, flatten_block_node]) + slice_blocks.append(flatten_block_name) + concat_block_name = "im2sequence.concat_block.{}".format(im2seq_counter) + # concat_block_node = helper.make_node("Concat", inputs=slice_blocks, outputs=[concat_block_name], axis=0) + concat_block_node = helper.make_node( + "Concat", inputs=slice_blocks, outputs=op.output('Out'), axis=0) + nodes.append(concat_block_node) + print("\n\n==========Importance Notice===========") + print( + "Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1." + ) + print("======================================\n") + return nodes diff --git a/x2paddle/op_mapper/paddle_op_mapper.py b/x2paddle/op_mapper/paddle_op_mapper.py index 373b89c..e38555e 100644 --- a/x2paddle/op_mapper/paddle_op_mapper.py +++ b/x2paddle/op_mapper/paddle_op_mapper.py @@ -21,8 +21,6 @@ import paddle.fluid.core as core import paddle.fluid as fluid import onnx from onnx import helper, onnx_pb -from .paddle_custom_layer.yolo_box import yolo_box -from .paddle_custom_layer.multiclass_nms import multiclass_nms class PaddleOpMapper(object): @@ -39,6 +37,60 @@ class PaddleOpMapper(object): self.name_counter = dict() + def convert(self, program, save_dir): + weight_nodes = self.convert_weights(program) + op_nodes = list() + input_nodes = list() + output_nodes = list() + + unsupported_ops = set() + + print("Translating PaddlePaddle to ONNX...\n") + for block in program.blocks: + for i, op in enumerate(block.ops): + sys.stdout.write( + "\rTotal:{}, Current:{} : {} ".format( + len(block.ops), i + 1, op.type)) + sys.stdout.flush() + if not hasattr(self, op.type): + unsupported_ops.add(op.type) + continue + if len(unsupported_ops) > 0: + continue + node = getattr(self, op.type)(op, block) + if op.type == 'feed': + input_nodes.append(node) + elif op.type == 'fetch': + output_nodes.append(node) + else: + if isinstance(node, list): + op_nodes = op_nodes + node + else: + op_nodes.append(node) + + if len(unsupported_ops) > 0: + print("\nThere's {} ops are not supported yet".format( + len(unsupported_ops))) + for op in unsupported_ops: + print("=========== {} ===========".format(op)) + return + + graph = helper.make_graph( + nodes=weight_nodes + op_nodes, + name='onnx_model_from_paddle', + initializer=[], + inputs=input_nodes, + outputs=output_nodes) + model = helper.make_model(graph, producer_name='X2Paddle') + onnx.checker.check_model(model) + + if not os.path.isdir(save_dir): + os.makedirs(save_dir) + with open(os.path.join(save_dir, 'x2paddle_model.onnx'), 'wb') as f: + f.write(model.SerializeToString()) + print("\nTranslated model saved in {}".format( + os.path.join(save_dir, 'x2paddle_model.onnx'))) + def get_name(self, op_name, var_name): name = 'p2o.{}.{}'.format(op_name, var_name) if name not in self.name_counter: @@ -47,6 +99,26 @@ class PaddleOpMapper(object): self.name_counter[name] += 1 return name + '.{}'.format(self.name_counter[name]) + def convert_weights(self, program): + var_names = program.global_block().vars + nodes = list() + for name in var_names: + var = program.global_block().var(name) + if name.endswith('feed') or name.endswith('fetch'): + continue + if not var.persistable: + continue + weight = np.array(fluid.global_scope().find_var(name).get_tensor()) + tensor = helper.make_tensor( + name=name, + dims=var.shape, + data_type=self.paddle_onnx_dtype_map[var.dtype], + vals=weight.flatten().tolist()) + node = helper.make_node( + 'Constant', inputs=[], outputs=[name], value=tensor) + nodes.append(node) + return nodes + def make_constant_node(self, name, dtype, value=None): if isinstance(value, list): dims = (len(value), ) @@ -181,11 +253,18 @@ class PaddleOpMapper(object): outputs=op.output('Out'), ) else: + input_shape = block.var(op.input('X')[0]).shape + k_size = op.attr('ksize') + paddings = op.attr('paddings') + if input_shape[2] > 0 and input_shape[2] + paddings[0] < k_size[0]: + k_size[0] = input_shape[2] + paddings[0] + if input_shape[3] > 0 and input_shape[3] + paddings[1] < k_size[1]: + k_size[1] = input_shape[3] + paddings[1] node = helper.make_node( pool_type[op.attr('pooling_type')][0], inputs=op.input('X'), outputs=op.output('Out'), - kernel_shape=op.attr('ksize'), + kernel_shape=k_size, strides=op.attr('strides'), pads=op.attr('paddings') + op.attr('paddings')) return node @@ -736,9 +815,11 @@ class PaddleOpMapper(object): return node def yolo_box(self, op, block): + from .paddle_custom_layer.yolo_box import yolo_box return yolo_box(op, block) def multiclass_nms(self, op, block): + from .paddle_custom_layer.multiclass_nms import multiclass_nms return multiclass_nms(op, block) def reciprocal(self, op, block): @@ -747,76 +828,6 @@ class PaddleOpMapper(object): node = helper.make_node('Reciprocal', inputs=inputs, outputs=outputs) return node - def convert_weights(self, program): - var_names = program.global_block().vars - nodes = list() - for name in var_names: - var = program.global_block().var(name) - if name.endswith('feed') or name.endswith('fetch'): - continue - if not var.persistable: - continue - weight = np.array(fluid.global_scope().find_var(name).get_tensor()) - tensor = helper.make_tensor( - name=name, - dims=var.shape, - data_type=self.paddle_onnx_dtype_map[var.dtype], - vals=weight.flatten().tolist()) - node = helper.make_node( - 'Constant', inputs=[], outputs=[name], value=tensor) - nodes.append(node) - return nodes - - def convert(self, program, save_dir): - weight_nodes = self.convert_weights(program) - op_nodes = list() - input_nodes = list() - output_nodes = list() - - unsupported_ops = set() - - print("Translating PaddlePaddle to ONNX...\n") - for block in program.blocks: - for i, op in enumerate(block.ops): - sys.stdout.write( - "\rTotal:{}, Current:{} : {} ".format( - len(block.ops), i + 1, op.type)) - sys.stdout.flush() - if not hasattr(self, op.type): - unsupported_ops.add(op.type) - continue - if len(unsupported_ops) > 0: - continue - node = getattr(self, op.type)(op, block) - if op.type == 'feed': - input_nodes.append(node) - elif op.type == 'fetch': - output_nodes.append(node) - else: - if isinstance(node, list): - op_nodes = op_nodes + node - else: - op_nodes.append(node) - - if len(unsupported_ops) > 0: - print("\nThere's {} ops are not supported yet".format( - len(unsupported_ops))) - for op in unsupported_ops: - print("=========== {} ===========".format(op)) - return - - graph = helper.make_graph( - nodes=weight_nodes + op_nodes, - name='onnx_model_from_paddle', - initializer=[], - inputs=input_nodes, - outputs=output_nodes) - model = helper.make_model(graph, producer_name='X2Paddle') - onnx.checker.check_model(model) - - if not os.path.isdir(save_dir): - os.makedirs(save_dir) - with open(os.path.join(save_dir, 'x2paddle_model.onnx'), 'wb') as f: - f.write(model.SerializeToString()) - print("\nTranslated model saved in {}".format( - os.path.join(save_dir, 'x2paddle_model.onnx'))) + def im2sequence(self, op, block): + from .paddle_custom_layer.im2sequence import im2sequence + return im2sequence(op, block) -- GitLab