提交 1db5fcf7 编写于 作者: C Channingss

add support for Expand, fix bug for elementwise_map

上级 e2867f5a
...@@ -263,21 +263,24 @@ class ONNXOpMapper(OpMapper): ...@@ -263,21 +263,24 @@ class ONNXOpMapper(OpMapper):
def elementwise_map(self, node): def elementwise_map(self, node):
assert node.layer_type in self.elementwise_ops assert node.layer_type in self.elementwise_ops
op_type = self.elementwise_ops[node.layer_type] op_type = self.elementwise_ops[node.layer_type]
val_x = self.graph.get_input_node(node, idx=0, copy=True) val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True) val_y = self.graph.get_input_node(node, idx=1, copy=True)
if len(val_x.out_shapes[0]) < len(val_y.out_shapes[0]):
val_x, val_y = val_y, val_x
val_y_shape = val_y.out_shapes[0] val_y_shape = val_y.out_shapes[0]
val_x_shape = val_x.out_shapes[0] val_x_shape = val_x.out_shapes[0]
if len(val_x_shape) < len(val_y_shape):
val_x, val_y = val_y, val_x
str_y_shape = ','.join(str(e) for e in val_y_shape)
str_x_shape = ','.join(str(e) for e in val_x_shape)
slice_idx = 0 slice_idx = 0
for dim in val_y_shape: if str_y_shape not in str_x_shape:
if dim == 1: for dim in val_y_shape:
slice_idx += 1 if dim == 1:
else: slice_idx += 1
break else:
break
attr = {"name": string(node.layer_name)} attr = {"name": string(node.layer_name)}
if slice_idx < len(val_y_shape) and slice_idx > 0: if slice_idx < len(val_y_shape) and slice_idx > 0:
val_y_reshaped = val_y_shape[slice_idx:] val_y_reshaped = val_y_shape[slice_idx:]
...@@ -353,47 +356,52 @@ class ONNXOpMapper(OpMapper): ...@@ -353,47 +356,52 @@ class ONNXOpMapper(OpMapper):
val_scales = self.graph.get_input_node(node, idx=1, copy=True) val_scales = self.graph.get_input_node(node, idx=1, copy=True)
val_y = self.graph.get_node(node.layer.output[0], copy=True) val_y = self.graph.get_node(node.layer.output[0], copy=True)
out_shape_ = val_y.out_shapes[0] out_shape = val_y.out_shapes[0]
if out_shape_ is not None: if out_shape is not None:
assert len(out_shape_) == 4, 'only 4-D Tensor as X and Y supported' assert len(out_shape) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = out_shape_[2:] out_shape = out_shape[2:]
scales = _const_weight_or_none(val_scales) scales = _const_weight_or_none(val_scales)
if isinstance(val_scales, ONNXGraphNode):
scales, _, _ = self.get_dynamic_shape(val_scales.layer_name)
attr = {'name': string(node.layer_name)}
use_scales = True
if scales is not None: if scales is not None:
assert len(scales) == 4, 'only 4-D Tensor as X and Y supported' try:
assert scales[0] == 1 and scales[ assert len(scales) == 4, 'only 4-D Tensor as X and Y supported'
1] == 1, 'only scale on (NC)HW supported' assert scales[0] == 1 and scales[
assert scales[2] == scales[ 1] == 1, 'only scale on (NC)HW supported'
3], 'only aspect-ratio-invariant scale supported' assert scales[2] == scales[
3], 'only aspect-ratio-invariant scale supported'
except:
use_scales = False
scale = scales[2] if scales else None scale = scales[2] if scales else None
if scale is None: if scale is None:
assert out_shape_, 'neither scales nor output shape is available' assert out_shape, 'neither scales nor output shape is available'
out_shape = out_shape_
else: else:
out_shape = None if out_shape is None:
if out_shape_ is None:
in_shape = val_x.out_shapes[0] in_shape = val_x.out_shapes[0]
assert in_shape is not None, 'out_shape required but not inferrable' assert in_shape is not None, 'out_shape required but not inferrable'
assert len( assert len(
in_shape) == 4, 'only 4-D Tensor as X and Y supported' in_shape) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = [in_shape[2] * scale, in_shape[3] * scale] out_shape = [in_shape[2] * scale, in_shape[3] * scale]
mode = node.get_attr('mode', 'nearest') mode = node.get_attr('mode', 'nearest')
fluid_op = 'resize_{}'.format(mode) fluid_op = 'resize_{}'.format(mode)
if 'linear' in mode: if 'linear' in mode:
print( print(
'Warnning: paddle not support resize wiht mode: linear, we use bilinear replace linear' 'Warnning: paddle not support op:resize wiht mode: linear, we use bilinear replace linear'
) )
fluid_op = 'resize_bilinear' fluid_op = 'resize_bilinear'
if isinstance(val_scales, ONNXGraphNode): if use_scales and scale is not None:
scale, _, _ = self.get_dynamic_shape(val_scales.layer_name) attr['scale'] = scale
else:
attr['out_shape'] = out_shape
attr = {
'scale': scale,
'out_shape': out_shape,
'name': string(node.layer_name)
}
node.fluid_code.add_layer(fluid_op, node.fluid_code.add_layer(fluid_op,
inputs=val_x, inputs=val_x,
output=node, output=node,
...@@ -493,7 +501,7 @@ class ONNXOpMapper(OpMapper): ...@@ -493,7 +501,7 @@ class ONNXOpMapper(OpMapper):
'using value as 1-D tensor may lead to fails', 'using value as 1-D tensor may lead to fails',
val_output.layer_name, val_output.layer_name) val_output.layer_name, val_output.layer_name)
if len(value) == 1: # scalar if len(value) == 1:
value = value.tolist() value = value.tolist()
shape = [1] shape = [1]
value = value[0] value = value[0]
...@@ -520,49 +528,30 @@ class ONNXOpMapper(OpMapper): ...@@ -520,49 +528,30 @@ class ONNXOpMapper(OpMapper):
param_attr=attr) param_attr=attr)
def Resize(self, node): def Resize(self, node):
self._interpolate(node)
def Upsample(self, node):
self._interpolate(node)
def Expand(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True) val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_scales = self.graph.get_input_node(node, idx=1, copy=True) val_shape = self.graph.get_input_node(node, idx=1, copy=True)
val_y = self.graph.get_node(node.layer.output[0], copy=True) val_y = self.graph.get_node(node.layer.output[0], copy=True)
out_shape = node.out_shapes[0]
out_shape_ = val_y.out_shapes[0] name_ones = node.layer_name + '_ones'
if out_shape_ is not None: attr_ones = {'shape': out_shape, 'dtype': string('int64')}
assert len(out_shape_) == 4, 'only 4-D Tensor as X and Y supported' node.fluid_code.add_layer('ones',
out_shape_ = out_shape_[2:] inputs=None,
scales = _const_weight_or_none(val_scales) output=name_ones,
if scales is not None: param_attr=attr_ones)
assert len(scales) == 4, 'only 4-D Tensor as X and Y supported' inputs = {'x': name_ones, 'y': val_x}
assert scales[0] == 1 and scales[ attr = {'name': string(node.layer_name)}
1] == 1, 'only scale on (NC)HW supported' node.fluid_code.add_layer('elementwise_mul',
assert scales[2] == scales[ inputs=inputs,
3], 'only aspect-ratio-invariant scale supported' output=node.layer_name,
scale = scales[2] if scales else None
if scale is None:
assert out_shape_, 'neither scales nor output shape is available'
out_shape = out_shape_
else:
out_shape = None
if out_shape_ is None:
in_shape = val_x.out_shapes[0]
assert in_shape is not None, 'out_shape required but not inferrable'
assert len(
in_shape) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = [in_shape[2] * scale, in_shape[3] * scale]
mode = node.get_attr('mode', 'nearest')
fluid_op = 'resize_{}'.format(mode)
attr = {
'scale': scale,
'out_shape': out_shape,
'name': string(node.layer_name)
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr) param_attr=attr)
def Upsample(self, node):
self._interpolate(node)
def Gather(self, node): def Gather(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True) val_x = self.graph.get_input_node(node, idx=0, copy=True)
indices = self.graph.get_input_node(node, idx=1, copy=True) indices = self.graph.get_input_node(node, idx=1, copy=True)
...@@ -716,7 +705,7 @@ class ONNXOpMapper(OpMapper): ...@@ -716,7 +705,7 @@ class ONNXOpMapper(OpMapper):
'dim': axis, 'dim': axis,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
# generation
node.fluid_code.add_layer('split', node.fluid_code.add_layer('split',
inputs=val_x, inputs=val_x,
output=val_y, output=val_y,
...@@ -731,21 +720,15 @@ class ONNXOpMapper(OpMapper): ...@@ -731,21 +720,15 @@ class ONNXOpMapper(OpMapper):
if isinstance(val_shape, ONNXGraphDataNode): if isinstance(val_shape, ONNXGraphDataNode):
self.omit_nodes.append(val_shape.layer_name) self.omit_nodes.append(val_shape.layer_name)
attr = {'name': string(node.layer_name)}
# catch dynamic graph shape # catch dynamic graph shape
if isinstance(val_shape, ONNXGraphNode): if isinstance(val_shape, ONNXGraphNode):
shape, _, _ = self.get_dynamic_shape(val_shape.layer_name) shape, _, _ = self.get_dynamic_shape(val_shape.layer_name)
attr['actual_shape'] = val_shape.layer_name
if shape is None: if shape is None:
shape = val_reshaped.out_shapes[0] shape = val_reshaped.out_shapes[0]
shape_dtype = val_shape.dtype
if shape_dtype is None:
_logger.warning(
'in op %s(%s -> Reshape -> %s): '
'dtype of input "shape" not inferred, int32 assumed',
node.layer_name, val_x.layer_name, val_reshaped.layer_name)
shape_dtype = _np.dtype('int32')
if shape is None: if shape is None:
shape = [1, -1] shape = [1, -1]
_logger.warning( _logger.warning(
...@@ -753,8 +736,8 @@ class ONNXOpMapper(OpMapper): ...@@ -753,8 +736,8 @@ class ONNXOpMapper(OpMapper):
'input "shape" not inferred, use [1, -1] as dummy value, ' 'input "shape" not inferred, use [1, -1] as dummy value, '
'the behavior of Paddle fluid maybe undefined', node.layer_name, 'the behavior of Paddle fluid maybe undefined', node.layer_name,
val_x.layer_name, val_reshaped.layer_name) val_x.layer_name, val_reshaped.layer_name)
attr = {'shape': shape, 'name': string(node.layer_name)}
attr['shape'] = shape
node.fluid_code.add_layer('reshape', node.fluid_code.add_layer('reshape',
inputs=val_x, inputs=val_x,
output=node, output=node,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册