提交 148c198e 编写于 作者: J jiangjiajun

add im2sequence support

上级 0baf9e69
__version__ = "0.7.2" __version__ = "0.7.4"
import onnx
import numpy as np
from onnx import onnx_pb, helper
im2seq_counter = 0
def im2sequence(op, block):
global im2sequence_counter
n, c, h, w = block.var(op.input('X')[0]).shape
assert h > 0 and w > 0, "Only supported fixed input shape for im2sequence operator."
stride_h, stride_w = op.attr('strides')
paddings = op.attr('paddings')
assert op.attr(
'out_stride'
) != 1, "Only out_stride==1 is supported for im2sequence operator."
h = h + paddings[0] + paddings[1]
w = w + paddings[1] + paddings[2]
kernel_h, kernel_w = op.attr('kernels')
out_h = 1 + (h - kernel_h + stride_h - 1) // stride_h
out_w = 1 + (w - kernel_w + stride_w - 1) // stride_w
h_steps = list()
for i in range(out_h):
h_steps.append([i * stride_h, i * stride_h + kernel_h])
w_steps = list()
for i in range(out_w):
w_steps.append([i * stride_w, i * stride_w + kernel_w])
nodes = list()
slice_blocks = list()
for i in range(out_h):
for j in range(out_w):
starts_name = "im2sequence.starts.{}.{}.{}".format(
im2seq_counter, i, j)
starts_tensor = helper.make_tensor(
name=starts_name,
data_type=onnx_pb.TensorProto.INT64,
dims=[4],
vals=[0, 0, h_steps[i][0], w_steps[j][0]])
ends_name = "im2sequence.ends.{}.{}.{}".format(im2seq_counter, i, j)
ends_tensor = helper.make_tensor(
name=ends_name,
data_type=onnx_pb.TensorProto.INT64,
dims=[4],
vals=[999999, 999999, h_steps[i][1], w_steps[j][1]])
starts_node = helper.make_node(
'Constant',
inputs=[],
outputs=[starts_name],
value=starts_tensor)
ends_node = helper.make_node(
'Constant', inputs=[], outputs=[ends_name], value=ends_tensor)
nodes.extend([starts_node, ends_node])
slice_block_name = "im2sequence.slice.{}.{}.{}".format(
im2seq_counter, i, j)
slice_block_node = helper.make_node(
'Slice',
inputs=[op.input('X')[0], starts_name, ends_name],
outputs=[slice_block_name])
flatten_block_name = "im2sequence.flatten.{}.{}.{}".format(
im2seq_counter, i, j)
flatten_block_node = helper.make_node(
"Flatten",
inputs=[slice_block_name],
outputs=[flatten_block_name],
axis=0)
nodes.extend([slice_block_node, flatten_block_node])
slice_blocks.append(flatten_block_name)
concat_block_name = "im2sequence.concat_block.{}".format(im2seq_counter)
# concat_block_node = helper.make_node("Concat", inputs=slice_blocks, outputs=[concat_block_name], axis=0)
concat_block_node = helper.make_node(
"Concat", inputs=slice_blocks, outputs=op.output('Out'), axis=0)
nodes.append(concat_block_node)
print("\n\n==========Importance Notice===========")
print(
"Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1."
)
print("======================================\n")
return nodes
...@@ -21,8 +21,6 @@ import paddle.fluid.core as core ...@@ -21,8 +21,6 @@ import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
import onnx import onnx
from onnx import helper, onnx_pb from onnx import helper, onnx_pb
from .paddle_custom_layer.yolo_box import yolo_box
from .paddle_custom_layer.multiclass_nms import multiclass_nms
class PaddleOpMapper(object): class PaddleOpMapper(object):
...@@ -39,6 +37,60 @@ class PaddleOpMapper(object): ...@@ -39,6 +37,60 @@ class PaddleOpMapper(object):
self.name_counter = dict() self.name_counter = dict()
def convert(self, program, save_dir):
weight_nodes = self.convert_weights(program)
op_nodes = list()
input_nodes = list()
output_nodes = list()
unsupported_ops = set()
print("Translating PaddlePaddle to ONNX...\n")
for block in program.blocks:
for i, op in enumerate(block.ops):
sys.stdout.write(
"\rTotal:{}, Current:{} : {} ".format(
len(block.ops), i + 1, op.type))
sys.stdout.flush()
if not hasattr(self, op.type):
unsupported_ops.add(op.type)
continue
if len(unsupported_ops) > 0:
continue
node = getattr(self, op.type)(op, block)
if op.type == 'feed':
input_nodes.append(node)
elif op.type == 'fetch':
output_nodes.append(node)
else:
if isinstance(node, list):
op_nodes = op_nodes + node
else:
op_nodes.append(node)
if len(unsupported_ops) > 0:
print("\nThere's {} ops are not supported yet".format(
len(unsupported_ops)))
for op in unsupported_ops:
print("=========== {} ===========".format(op))
return
graph = helper.make_graph(
nodes=weight_nodes + op_nodes,
name='onnx_model_from_paddle',
initializer=[],
inputs=input_nodes,
outputs=output_nodes)
model = helper.make_model(graph, producer_name='X2Paddle')
onnx.checker.check_model(model)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, 'x2paddle_model.onnx'), 'wb') as f:
f.write(model.SerializeToString())
print("\nTranslated model saved in {}".format(
os.path.join(save_dir, 'x2paddle_model.onnx')))
def get_name(self, op_name, var_name): def get_name(self, op_name, var_name):
name = 'p2o.{}.{}'.format(op_name, var_name) name = 'p2o.{}.{}'.format(op_name, var_name)
if name not in self.name_counter: if name not in self.name_counter:
...@@ -47,6 +99,26 @@ class PaddleOpMapper(object): ...@@ -47,6 +99,26 @@ class PaddleOpMapper(object):
self.name_counter[name] += 1 self.name_counter[name] += 1
return name + '.{}'.format(self.name_counter[name]) return name + '.{}'.format(self.name_counter[name])
def convert_weights(self, program):
var_names = program.global_block().vars
nodes = list()
for name in var_names:
var = program.global_block().var(name)
if name.endswith('feed') or name.endswith('fetch'):
continue
if not var.persistable:
continue
weight = np.array(fluid.global_scope().find_var(name).get_tensor())
tensor = helper.make_tensor(
name=name,
dims=var.shape,
data_type=self.paddle_onnx_dtype_map[var.dtype],
vals=weight.flatten().tolist())
node = helper.make_node(
'Constant', inputs=[], outputs=[name], value=tensor)
nodes.append(node)
return nodes
def make_constant_node(self, name, dtype, value=None): def make_constant_node(self, name, dtype, value=None):
if isinstance(value, list): if isinstance(value, list):
dims = (len(value), ) dims = (len(value), )
...@@ -181,11 +253,18 @@ class PaddleOpMapper(object): ...@@ -181,11 +253,18 @@ class PaddleOpMapper(object):
outputs=op.output('Out'), outputs=op.output('Out'),
) )
else: else:
input_shape = block.var(op.input('X')[0]).shape
k_size = op.attr('ksize')
paddings = op.attr('paddings')
if input_shape[2] > 0 and input_shape[2] + paddings[0] < k_size[0]:
k_size[0] = input_shape[2] + paddings[0]
if input_shape[3] > 0 and input_shape[3] + paddings[1] < k_size[1]:
k_size[1] = input_shape[3] + paddings[1]
node = helper.make_node( node = helper.make_node(
pool_type[op.attr('pooling_type')][0], pool_type[op.attr('pooling_type')][0],
inputs=op.input('X'), inputs=op.input('X'),
outputs=op.output('Out'), outputs=op.output('Out'),
kernel_shape=op.attr('ksize'), kernel_shape=k_size,
strides=op.attr('strides'), strides=op.attr('strides'),
pads=op.attr('paddings') + op.attr('paddings')) pads=op.attr('paddings') + op.attr('paddings'))
return node return node
...@@ -736,9 +815,11 @@ class PaddleOpMapper(object): ...@@ -736,9 +815,11 @@ class PaddleOpMapper(object):
return node return node
def yolo_box(self, op, block): def yolo_box(self, op, block):
from .paddle_custom_layer.yolo_box import yolo_box
return yolo_box(op, block) return yolo_box(op, block)
def multiclass_nms(self, op, block): def multiclass_nms(self, op, block):
from .paddle_custom_layer.multiclass_nms import multiclass_nms
return multiclass_nms(op, block) return multiclass_nms(op, block)
def reciprocal(self, op, block): def reciprocal(self, op, block):
...@@ -747,76 +828,6 @@ class PaddleOpMapper(object): ...@@ -747,76 +828,6 @@ class PaddleOpMapper(object):
node = helper.make_node('Reciprocal', inputs=inputs, outputs=outputs) node = helper.make_node('Reciprocal', inputs=inputs, outputs=outputs)
return node return node
def convert_weights(self, program): def im2sequence(self, op, block):
var_names = program.global_block().vars from .paddle_custom_layer.im2sequence import im2sequence
nodes = list() return im2sequence(op, block)
for name in var_names:
var = program.global_block().var(name)
if name.endswith('feed') or name.endswith('fetch'):
continue
if not var.persistable:
continue
weight = np.array(fluid.global_scope().find_var(name).get_tensor())
tensor = helper.make_tensor(
name=name,
dims=var.shape,
data_type=self.paddle_onnx_dtype_map[var.dtype],
vals=weight.flatten().tolist())
node = helper.make_node(
'Constant', inputs=[], outputs=[name], value=tensor)
nodes.append(node)
return nodes
def convert(self, program, save_dir):
weight_nodes = self.convert_weights(program)
op_nodes = list()
input_nodes = list()
output_nodes = list()
unsupported_ops = set()
print("Translating PaddlePaddle to ONNX...\n")
for block in program.blocks:
for i, op in enumerate(block.ops):
sys.stdout.write(
"\rTotal:{}, Current:{} : {} ".format(
len(block.ops), i + 1, op.type))
sys.stdout.flush()
if not hasattr(self, op.type):
unsupported_ops.add(op.type)
continue
if len(unsupported_ops) > 0:
continue
node = getattr(self, op.type)(op, block)
if op.type == 'feed':
input_nodes.append(node)
elif op.type == 'fetch':
output_nodes.append(node)
else:
if isinstance(node, list):
op_nodes = op_nodes + node
else:
op_nodes.append(node)
if len(unsupported_ops) > 0:
print("\nThere's {} ops are not supported yet".format(
len(unsupported_ops)))
for op in unsupported_ops:
print("=========== {} ===========".format(op))
return
graph = helper.make_graph(
nodes=weight_nodes + op_nodes,
name='onnx_model_from_paddle',
initializer=[],
inputs=input_nodes,
outputs=output_nodes)
model = helper.make_model(graph, producer_name='X2Paddle')
onnx.checker.check_model(model)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, 'x2paddle_model.onnx'), 'wb') as f:
f.write(model.SerializeToString())
print("\nTranslated model saved in {}".format(
os.path.join(save_dir, 'x2paddle_model.onnx')))
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册