未验证 提交 4350bd05 编写于 作者: J Jason 提交者: GitHub

Merge pull request #274 from PaddlePaddle/2onnx

2onnx
......@@ -13,6 +13,7 @@
# limitations under the License.
import math
import sys
import x2paddle
import os
import numpy as np
......@@ -77,6 +78,11 @@ class PaddleOpMapper(object):
'Relu', inputs=op.input('X'), outputs=op.output('Out'))
return node
def sigmoid(self, op, block):
node = helper.make_node(
'Sigmoid', inputs=op.input('X'), outputs=op.output('Out'))
return node
def elementwise_add(self, op, block):
axis = op.attr('axis')
x_shape = block.var(op.input('X')[0]).shape
......@@ -128,12 +134,40 @@ class PaddleOpMapper(object):
return node
def softmax(self, op, block):
axis = op.attr('axis')
shape = block.var(op.output('Out')[0]).shape
if axis < 0:
axis += len(shape)
if axis == len(shape) - 1:
node = helper.make_node(
'Softmax',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'))
return node
else:
perm = [i for i in range(len(shape))]
perm[-1] = axis
perm[axis] = len(shape) - 1
transpose_name0 = self.get_name(op.type, 'transpose')
transpose_node0 = helper.make_node(
'Transpose',
inputs=op.input('X'),
outputs=[transpose_name0],
perm=perm)
softmax_name = self.get_name(op.type, 'softmax')
softmax_node = helper.make_node(
'Softmax',
inputs=[transpose_name0],
outputs=[softmax_name],
axis=-1)
transpose_name1 = self.get_name(op.type, 'transpose')
transpose_node1 = helper.make_node(
'Transpose',
inputs=[softmax_name],
outputs=op.output('Out'),
perm=perm)
return [transpose_node0, softmax_node, transpose_node1]
def scale(self, op, block):
scale = op.attr('scale')
......@@ -261,21 +295,22 @@ class PaddleOpMapper(object):
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'))
return node
def slice(self, op, block):
axes = op.attr('axes')
starts = op.attr('starts')
ends = op.attr('ends')
axes_name = get_name(op.type, 'axes')
starts_name = get_name(op.type, 'starts')
ends_name = get_name(op.type, 'ends')
axes_node = make_constant_node(axes_name, onnx_pb.TensorProto.INT64,
axes)
starts_node = make_constant_node(starts_name, onnx_pb.TensorProto.INT64,
starts)
ends_node = make_constant_node(ends_name, onnx_pb.TensorProto.INT64,
ends)
axes_name = self.get_name(op.type, 'axes')
starts_name = self.get_name(op.type, 'starts')
ends_name = self.get_name(op.type, 'ends')
axes_node = self.make_constant_node(axes_name,
onnx_pb.TensorProto.INT64, axes)
starts_node = self.make_constant_node(starts_name,
onnx_pb.TensorProto.INT64, starts)
ends_node = self.make_constant_node(ends_name,
onnx_pb.TensorProto.INT64, ends)
node = helper.make_node(
"Slice",
inputs=[op.input('Input')[0], starts_name, ends_name, axes_name],
......@@ -288,12 +323,14 @@ class PaddleOpMapper(object):
dtype = op.attr('dtype')
shape = op.attr('shape')
value = np.ones(shape) * value
if dtype == 2:
value = value.astype('int32')
node = helper.make_node(
'Constant',
inputs=[],
outputs=op.attr('Out'),
outputs=op.output('Out'),
value=helper.make_tensor(
name=op.attr('Out'),
name=op.output('Out')[0],
data_type=self.paddle_onnx_dtype_map[dtype],
dims=shape,
vals=value.tolist()))
......@@ -304,28 +341,45 @@ class PaddleOpMapper(object):
'Transpose',
inputs=op.input('X'),
outputs=op.output('Out'),
perm=op.attr('perm'))
perm=op.attr('axis'))
return node
def reshape2(self, op, block):
input_names = op.input_names
if 'Shape' in input_names and len(op.input('Shape')) > 0:
if len(op.input('ShapeTensor')) > 1:
cast_shape_nodes = list()
cast_shape_names = list()
for i in range(len(op.input('ShapeTensor'))):
dim = op.input('ShapeTensor')[i]
temp_name = self.get_name(op.type, 'shape.cast')
node = helper.make_node(
'Cast',
inputs=[dim],
outputs=[temp_name],
to=onnx_pb.TensorProto.INT64)
cast_shape_nodes.append(node)
cast_shape_names.append(temp_name)
temp_name = self.get_name(op.type, 'shape.concat')
shape_node = helper.make_node(
'Concat', inputs=cast_shape_names, outputs=[temp_name], axis=-1)
node = helper.make_node(
'Reshape',
inputs=[op.input('X')[0],
op.input('Shape')[0]],
inputs=[op.input('X')[0], temp_name],
outputs=op.output('Out'))
return cast_shape_nodes + [shape_node, node]
else:
shape = op.attr('shape')
shape_name = get_name(op.type, 'shape')
shape_node = make_constant_node(shape_name,
onnxpb.TensorProto.INT64, shape)
temp_name = self.get_name(op.type, 'shape.cast')
cast_shape_node = helper.make_node(
'Cast',
inputs=op.input('ShapeTensor'),
outputs=[temp_name],
to=onnx_pb.TensorProto.INT64)
node = helper.make_node(
'Reshape',
inputs=[op.input('X')[0], shape_name],
inputs=[op.input('X')[0], temp_name],
outputs=op.output('Out'))
return [shape_node, node]
return node
return [cast_shape_node, node]
def dropout(self, op, block):
dropout_mode = op.attr('dropout_implementation')
......@@ -351,24 +405,119 @@ class PaddleOpMapper(object):
'ReduceMean',
inputs=op.input('X'),
outputs=op.output('Out'),
axes=op.attr('axes'),
axes=op.attr('dim'),
keepdims=op.attr('keep_dim'))
return node
def bilinear_interp(self, op, block):
input_names = op.input_names
coordinate_transformation_mode = 'half_pixel'
shape_dtype = block.var(op.input('OutSize')[0]).dtype
if op.attr('align_corners'):
coordinate_transformation_mode = 'align_corners'
if 'OutSize' in input_names and len(op.input('OutSize')) > 0:
roi_node = self.make_constant_node(
self.get_name(op.type, 'roi'), onnx_pb.TensorProto.FLOAT,
[1, 1, 1, 1, 1, 1, 1, 1])
roi_name = self.get_name(op.type, 'roi')
roi_node = self.make_constant_node(
roi_name, onnx_pb.TensorProto.FLOAT, [1, 1, 1, 1, 1, 1, 1, 1])
empty_name = self.get_name(op.type, 'empty')
empty_tensor = helper.make_tensor(
empty_name,
onnx_pb.TensorProto.FLOAT, (0, ),
np.array([]).astype('float32'),
raw=False)
empty_node = helper.make_node(
'Constant', [], outputs=[empty_name], value=empty_tensor)
shape_name0 = self.get_name(op.type, 'shape')
shape_node0 = helper.make_node(
'Shape', inputs=op.input('X'), outputs=[shape_name0])
starts_name = self.get_name(op.type, 'slice.starts')
starts_node = self.make_constant_node(
starts_name, onnx_pb.TensorProto.INT64, [0])
ends_name = self.get_name(op.type, 'slice.ends')
ends_node = self.make_constant_node(ends_name,
onnx_pb.TensorProto.INT64, [2])
shape_name1 = self.get_name(op.type, 'shape')
shape_node1 = helper.make_node(
'Slice',
inputs=[shape_name0, starts_name, ends_name],
outputs=[shape_name1])
shape_name2 = self.get_name(op.type, "shape.cast")
shape_node2 = helper.make_node(
'Cast',
inputs=op.input('OutSize'),
outputs=[shape_name2],
to=onnx_pb.TensorProto.INT64)
shape_name3 = self.get_name(op.type, "shape.concat")
shape_node3 = helper.make_node(
'Concat',
inputs=[shape_name1, shape_name2],
outputs=[shape_name3],
axis=0)
result_node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], roi_name, empty_name, shape_name3],
outputs=op.output('Out'),
mode='linear',
coordinate_transformation_mode=coordinate_transformation_mode)
return [
roi_node, empty_node, shape_node0, starts_node, ends_node,
shape_node1, shape_node2, shape_node3, result_node
]
elif 'Scale' in input_names and len(op.input('Scale')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0],
op.input('Scale')[0]],
outputs=op.output('Out'),
mode='linear',
coordinate_transformation_mode=coordinate_transformation_mode)
else:
out_shape = [op.attr('out_h'), op.attr('out_w')]
scale = op.attr('scale')
if out_shape.count(-1) > 0:
scale_name = self.get_name(op.type, 'scale')
scale_node = self.make_constant_node(
scale_name, onnx_pb.TensorProto.FLOAT, [1, 1, scale, scale])
roi_name = self.get_name(op.type, 'roi')
roi_node = self.make_constant_node(roi_name,
onnx_pb.TensorProto.FLOAT,
[1, 1, 1, 1, 1, 1, 1, 1])
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], roi_name, scale_name],
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode
)
return [scale_node, roi_node, node]
else:
raise Exception("Unexpected situation happend")
return node
def nearest_interp(self, op, block):
input_names = op.input_names
coordinate_transformation_mode = 'half_pixel'
if op.attr('align_corners'):
coordinate_transformation_mode = 'align_corners'
if 'OutSize' in input_names and len(op.input('OutSize')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], '',
op.input('OutSize')[0]],
outputs=op.output('Out'))
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode)
elif 'Scale' in input_names and len(op.input('Scale')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0],
op.input('Scale')[0]],
outputs=op.output('Out'))
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode)
else:
out_shape = [op.attr('out_h'), op.attr('out_w')]
scale = op.attr('scale')
......@@ -384,7 +533,9 @@ class PaddleOpMapper(object):
'Resize',
inputs=[op.input('X')[0], roi_name, scale_name],
outputs=op.output('Out'),
mode='nearest')
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode
)
return [scale_node, roi_node, node]
else:
raise Exception("Unexpected situation happend")
......@@ -449,6 +600,23 @@ class PaddleOpMapper(object):
elem_type=self.paddle_onnx_dtype_map[var.dtype])
return tensor_info
def unsqueeze2(self, op, block):
node = helper.make_node(
'Unsqueeze',
inputs=op.input('X'),
outputs=op.output('Out'),
axes=op.attr('axes'))
return node
def arg_max(self, op, block):
node = helper.make_node(
'ArgMax',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'),
keepdims=0)
return node
def convert_weights(self, program):
var_names = program.global_block().vars
nodes = list()
......@@ -477,9 +645,12 @@ class PaddleOpMapper(object):
unsupported_ops = set()
print("Translating PaddlePaddle to ONNX...\n")
for block in program.blocks:
for op in block.ops:
print('Translating op: {}'.format(op.type))
for i, op in enumerate(block.ops):
sys.stdout.write(
"\rTotal:{}, Current:{} : {} ".format(
len(block.ops), i + 1, op.type))
if not hasattr(self, op.type):
unsupported_ops.add(op.type)
continue
......@@ -497,7 +668,7 @@ class PaddleOpMapper(object):
op_nodes.append(node)
if len(unsupported_ops) > 0:
print("There's {} ops are not supported yet".format(
print("\nThere's {} ops are not supported yet".format(
len(unsupported_ops)))
for op in unsupported_ops:
print("=========== {} ===========".format(op))
......@@ -516,5 +687,5 @@ class PaddleOpMapper(object):
os.makedirs(save_dir)
with open(os.path.join(save_dir, 'x2paddle_model.onnx'), 'wb') as f:
f.write(model.SerializeToString())
print("Translated model saved in {}".format(
print("\nTranslated model saved in {}".format(
os.path.join(save_dir, 'x2paddle_model.onnx')))
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册