提交 63ac4c2c 编写于 作者: M Macrobull

fix onnx.AttributeProto.STRING support, add interpolate ops

上级 de1648a8
......@@ -80,7 +80,7 @@ def build_value_refs(nodes):
def get_attribute_value2(attr):
"""
get_attribute_value with tensor conversion
get_attribute_value enhanced
"""
if attr.type == onnx.AttributeProto.TENSOR:
......@@ -88,6 +88,9 @@ def get_attribute_value2(attr):
data = attr.t.raw_data
value = np.frombuffer(
data, dtype=dtype, count=(len(data) // dtype.itemsize))
elif attr.type == onnx.AttributeProto.STRING:
value = attr.s
value = value.decode() if isinstance(value, bytes) else value
else:
value = get_attribute_value(attr)
return value
......
......@@ -3,6 +3,8 @@
"""
ONNX to Paddle fluid symbolic translation
TODO: move non-ONNX ops out to symbolic_aten.py, symbolic_caffe2.py ...
Created on Mon Feb 25 09:33:43 2019
@author: Macrobull
......@@ -288,6 +290,17 @@ def _assign(prog, attrs):
)
def _zeros_like(prog, val_ref, val_out, value_infos):
prog.Op(
'',
'Sub',
[val_ref, val_ref],
[val_out], # val
dict(axis=0),
value_infos,
)
def _pad_if_asymmetric(prog, pads, val_name, value_infos): # pads: SSEE
assert len(pads) & 1 == 0
ndims = len(pads) // 2
......@@ -331,7 +344,7 @@ def _adaptive_pool(prog, pool_type, inputs, outputs, attrs, name=''):
# interpretation
pool_size = attrs['output_size'] # required
poolnd = len(pool_size)
assert 2 <= poolnd <= 3, 'only pool2d and pool3d supported'
assert 2 <= poolnd <= 3, 'only pool2d and pool3d is supported'
fluid_op = 'adaptive_pool{}d'.format(poolnd)
name_attr = ', name={}'.format(repr(name)) if name else ''
......@@ -386,7 +399,7 @@ def _global_pool(prog, pool_type, inputs, outputs, attrs, value_infos, name=''):
poolnd = len(input_shape) - 2 # NC...
elif output_shape:
poolnd = len(output_shape) - 2 # NC...
assert 2 <= poolnd <= 3, 'only pool2d and pool3d supported'
assert 2 <= poolnd <= 3, 'only pool2d and pool3d is supported'
fluid_op = 'pool{}d'.format(poolnd)
name_attr = ', name={}'.format(repr(name)) if name else ''
......@@ -430,10 +443,10 @@ def _pool(prog, pool_type, inputs, outputs, attrs, value_infos, name=''):
# interpretation
assert attrs.get(
'auto_pad',
'NOTSET') == 'NOTSET', 'only auto_pad = NOTSET supported' # optional
'NOTSET') == 'NOTSET', 'only auto_pad = NOTSET is supported' # optional
pool_size = attrs['kernel_shape'] # required
poolnd = len(pool_size)
assert 2 <= poolnd <= 3, 'only pool2d and pool3d supported'
assert 2 <= poolnd <= 3, 'only pool2d and pool3d is supported'
fluid_op = 'pool{}d'.format(poolnd)
strides = attrs.get('strides', [1] * poolnd) # optional
......@@ -538,14 +551,68 @@ def _roi_pool(prog, fluid_op, inputs, outputs, attrs, value_infos, name):
)
def _zeros_like(prog, val_ref, val_out, value_infos):
prog.Op(
'',
'Sub',
[val_ref, val_ref],
[val_out], # val
dict(axis=0),
value_infos,
def _interpolate(prog, inputs, outputs, attrs, value_infos, name=''):
# I/O
val_x, val_scales = inputs
val_y, = outputs
var_x = _make_var_name(val_x)
var_y = _make_var_name(val_y)
# interpretation
# output shape
out_shape_ = _shape_or_none(value_infos, val_y)
if out_shape_ is not None:
assert len(out_shape_) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = out_shape_[2:]
# try scales
scales = _const_weight_or_none(value_infos, val_scales)
if scales is not None:
assert len(scales) == 4, 'only 4-D Tensor as X and Y supported'
assert scales[0] == 1 and scales[
1] == 1, 'only scale on (NC)HW supported'
assert scales[2] == scales[
3], 'only aspect-ratio-invariant scale supported'
scale = scales[2] if scales else None
# try input shape
if scale is None:
assert out_shape_, 'neither scales nor output shape is available'
out_shape = out_shape_
else:
out_shape = None
if out_shape_ is None:
in_shape = _shape_or_none(value_infos, val_x)
assert in_shape is not None, 'out_shape required but not inferrable'
assert len(in_shape) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = [in_shape[2] * scale, in_shape[3] * scale]
mode = attrs.get('mode', 'nearest')
fluid_op = 'resize_{}'.format(mode) # not sure bilinear will be linear?
name_attr = ', name={}'.format(repr(name)) if name else ''
# generation
prog.Code('{} = layers.{}({}'
', scale={}'
', out_shape={}'
'{})'.format(
var_y,
fluid_op,
var_x,
# attrs
scale,
out_shape,
name_attr,
))
fluid_op = '{}_interp'.format(mode)
prog.VarDesc(var_y)
prog.OpDesc(
fluid_op,
([var_x], 'X'),
([var_y], 'Out'),
dict(
interp_method=mode,
out_h=out_shape_[0],
out_w=out_shape_[1],
),
)
......@@ -565,21 +632,6 @@ def AdaptiveMaxPool(prog, inputs, outputs, attrs, *args, name='', **kwargs):
return _adaptive_pool(prog, 'max', inputs, outputs, attrs, name=name)
def AveragePool(prog,
inputs,
outputs,
attrs,
value_infos,
name='',
*args,
**kwargs):
"""
onnx::AveragePool-10:
"""
return _pool(prog, 'avg', inputs, outputs, attrs, value_infos, name=name)
def AffineGrid(prog, inputs, outputs, attrs, *args, name='', **kwargs):
"""
aten::affine_grid
......@@ -616,6 +668,21 @@ def AffineGrid(prog, inputs, outputs, attrs, *args, name='', **kwargs):
)
def AveragePool(prog,
inputs,
outputs,
attrs,
value_infos,
name='',
*args,
**kwargs):
"""
onnx::AveragePool-10:
"""
return _pool(prog, 'avg', inputs, outputs, attrs, value_infos, name=name)
def BatchNormalization(prog,
inputs,
outputs,
......@@ -805,6 +872,7 @@ def Constant(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
# generation
value = value.tolist()
if len(value) == 1: # scalar
shape = [1] # WORKAROUND: bad scalar support
value = value[0]
fluid_op = 'fill_constant'
prog.Code('{} = layers.{}(shape={}, dtype={}, value={})'.format(
......@@ -874,7 +942,7 @@ def Conv(prog,
*args,
**kwargs):
"""
onnx::ConstantOfShape-1:
onnx::Conv-1:
"""
# I/O
......@@ -888,13 +956,13 @@ def Conv(prog,
# interpretation
assert attrs.get(
'auto_pad',
'NOTSET') == 'NOTSET', 'only auto_pad == NOTSET supported' # optional
'auto_pad', 'NOTSET'
) == 'NOTSET', 'only auto_pad == NOTSET is supported' # optional
kernel_shape = _shape(value_infos, val_w)[2:] # OI...
assert kernel_shape == attrs[
'kernel_shape'], 'kernel_shape in attr unmatches value_info' # HW
convnd = len(kernel_shape)
assert 2 <= convnd <= 3, 'only conv2d and conv3d supported'
assert 2 <= convnd <= 3, 'only conv2d and conv3d is supported'
num_out_channels = _shape(value_infos, val_w)[0] # OI...
fluid_op = 'conv{}d'.format(convnd)
......@@ -994,16 +1062,16 @@ def ConvTranspose(prog,
# interpretation
assert attrs.get(
'auto_pad',
'NOTSET') == 'NOTSET', 'only auto_pad == NOTSET supported' # optional
assert sum(
attrs.get('output_padding',
[])) == 0, 'only zero output_padding supported' # optional ?
'auto_pad', 'NOTSET'
) == 'NOTSET', 'only auto_pad == NOTSET is supported' # optional
assert sum(attrs.get(
'output_padding',
[])) == 0, 'only zero output_padding is supported' # optional ?
kernel_shape = _shape(value_infos, val_w)[2:] # IO...
assert kernel_shape == attrs[
'kernel_shape'], 'kernel_shape in attr unmatches value_info' # HW
convnd = len(kernel_shape)
assert 2 <= convnd <= 3, 'only conv2d_transpose and conv3d_transpose supported'
assert 2 <= convnd <= 3, 'only conv2d_transpose and conv3d_transpose is supported'
num_out_channels = _shape(value_infos, val_w)[1] # IO...
fluid_op = 'conv{}d_transpose'.format(convnd)
......@@ -1285,14 +1353,6 @@ def MaxRoiPool(prog, inputs, outputs, attrs, value_infos, name, *args,
_roi_pool(prog, 'roi_pool', inputs, outputs, attrs, value_infos, name)
def RoiAlign(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
"""
caffe2::RoiAlign
"""
_roi_pool(prog, 'roi_align', inputs, outputs, attrs, value_infos, name)
def Pad(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
"""
onnx::Pad-2:
......@@ -1502,6 +1562,54 @@ def Reshape(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
)
def Resize(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
"""
onnx::Resize-10:
"""
return _interpolate(prog, inputs, outputs, attrs, value_infos, name=name)
def RoiAlign(prog, inputs, outputs, attrs, value_infos, name, *args, **kwargs):
"""
caffe2::RoiAlign
"""
_roi_pool(prog, 'roi_align', inputs, outputs, attrs, value_infos, name)
#def Shape(
# prog, inputs, outputs, attrs, value_infos,
# *args, **kwargs):
# """
# onnx::ConstantOfShape-1:
# """
#
# # I/O
# val_data, = inputs
# val_shape, = outputs
# var_data = _make_var_name(val_data)
# var_shape = _make_var_name(val_shape)
#
# # interpretation
# fluid_op = 'shape'
## value_infos[val_shape]['remove_batch'] = False
#
# # generation
# prog.Code('{} = layers.{}({})'
# .format(var_shape,
# fluid_op,
# var_data,
# # attrs
# ))
# prog.VarDesc(var_shape) # , _value_info_or_none(value_infos, val_shape))
# prog.OpDesc(fluid_op,
# ([var_data], 'X'),
# ([var_shape], 'Out'),
# dict(),
# )
def Slice(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
"""
onnx::Slice-1:9
......@@ -1523,15 +1631,15 @@ def Slice(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
# ndims = len(shape)
# for idx, value in enumerate(axes):
# if value > ONNX_INT_MAX // 2:
# axes[idx] = ndims + value - ONNX_INT_MAX - 1
# axes[idx] = ndims + value - ONNX_INT_MAX
# FIXME: Paddle 1.3 Doc: '对于未知大小维度的末尾进行切片,则建议传入 INT_MAX' not works ?
for idx, value in enumerate(starts):
if value > ONNX_INT_MAX // 2:
value = value - ONNX_INT_MAX - 1
value = value - ONNX_INT_MAX
starts[idx] = shape[axes[idx]] + value
for idx, value in enumerate(ends):
if value > ONNX_INT_MAX // 2:
value = value - ONNX_INT_MAX - 1
value = value - ONNX_INT_MAX
ends[idx] = shape[axes[idx]] + value
# generation
......@@ -1561,6 +1669,47 @@ def Slice(prog, inputs, outputs, attrs, value_infos, *args, **kwargs):
)
def Split(prog, inputs, outputs, attrs, *args, name='', **kwargs):
"""
onnx::Split-2:
"""
# I/O
val_input, = inputs
var_outs = [_make_var_name(val) for val in outputs]
var_input = _make_var_name(val_input)
# interpretation
fluid_op = 'split'
split = attrs['split'] # required
axis = attrs.get('axis', 0) # optional
name_attr = ', name={}'.format(repr(name)) if name else ''
# generation
prog.Code('{} = layers.{}({}, {}'
', dim={}'
'{})'.format(
', '.join(var_outs),
fluid_op,
var_input,
split,
# attrs
axis,
name_attr,
))
for var_out in var_outs:
prog.VarDesc(var_out)
prog.OpDesc(
fluid_op,
(var_input, 'X'),
([var_outs], *(['Out'] * len(var_outs))),
dict(
axis=axis,
sections=split,
),
)
def Sum(prog, inputs, outputs, *args, **kwargs):
"""
onnx::Sum-8:
......@@ -1593,7 +1742,7 @@ def Sum(prog, inputs, outputs, *args, **kwargs):
def Tile(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
"""
onnx::ConstantOfShape-6:
onnx::Tile-1:
"""
# I/O
......@@ -1630,77 +1779,19 @@ def Tile(prog, inputs, outputs, attrs, value_infos, name='', *args, **kwargs):
)
#def Shape(
# prog, inputs, outputs, attrs, value_infos,
# *args, **kwargs):
# """
# onnx::ConstantOfShape-1:
# """
#
# # I/O
# val_data, = inputs
# val_shape, = outputs
# var_data = _make_var_name(val_data)
# var_shape = _make_var_name(val_shape)
#
# # interpretation
# fluid_op = 'shape'
## value_infos[val_shape]['remove_batch'] = False
#
# # generation
# prog.Code('{} = layers.{}({})'
# .format(var_shape,
# fluid_op,
# var_data,
# # attrs
# ))
# prog.VarDesc(var_shape) # , _value_info_or_none(value_infos, val_shape))
# prog.OpDesc(fluid_op,
# ([var_data], 'X'),
# ([var_shape], 'Out'),
# dict(),
# )
def Split(prog, inputs, outputs, attrs, *args, name='', **kwargs):
def Upsample(prog,
inputs,
outputs,
attrs,
value_infos,
name='',
*args,
**kwargs):
"""
onnx::Split-2:
onnx::Upsample-9:9
"""
# I/O
val_input, = inputs
var_outs = [_make_var_name(val) for val in outputs]
var_input = _make_var_name(val_input)
# interpretation
fluid_op = 'split'
split = attrs['split'] # required
axis = attrs.get('axis', 0) # optional
name_attr = ', name={}'.format(repr(name)) if name else ''
# generation
prog.Code('{} = layers.{}({}, {}'
', dim={}'
'{})'.format(
', '.join(var_outs),
fluid_op,
var_input,
split,
# attrs
axis,
name_attr,
))
for var_out in var_outs:
prog.VarDesc(var_out)
prog.OpDesc(
fluid_op,
(var_input, 'X'),
([var_outs], *(['Out'] * len(var_outs))),
dict(
axis=axis,
sections=split,
),
)
return _interpolate(prog, inputs, outputs, attrs, value_infos, name=name)
if __name__ == '__main__':
......
......@@ -158,9 +158,12 @@ class Program(object):
raise ValueError('unsupported attribute {} = {}'.format(
key, value))
else: # WORKAROUND: shape of scalars is []
od_attr.type = framework_pb2.INTS
logger.warning('using attribute %s = %s as INTS', key,
value)
raise ValueError('unsupported attribute {} = {}'.format(
key, value))
# od_attr.type = framework_pb2.INTS
# logger.warning('using attribute %s = %s as INTS', key, value)
else:
raise ValueError('unsupported attribute {} = {}'.format(
key, value))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册