未验证 提交 ec4e2518 编写于 作者: M mamingjie-China 提交者: GitHub

Merge pull request #5 from PaddlePaddle/develop

更新数据
......@@ -44,8 +44,6 @@ class ONNXGraphNode(GraphNode):
self.layer_type = layer.op_type
self.fluid_code = FluidCode()
self.attr_map = self.get_attr_map()
self.dtype_map = {1: "float32", 3: "int32", 9: "int64"}
self.weight_inputs = list()
self.out_shapes = list()
self.dtype = None
self.which_child = {}
......@@ -206,30 +204,39 @@ class ONNXGraph(Graph):
#generate connection between nodes for topo
for layer_name, node in self.node_map.items():
if isinstance(node, ONNXGraphNode):
for idx, in_node in enumerate(node.layer.input):
if in_node not in self.node_map:
flag = 0
for nd in self.model.node:
for idx, opt in enumerate(nd.output):
if opt == in_node:
self.connect(nd.name, layer_name)
flag = 1
node.which_child[nd.name] = idx
self.node_map[nd.name].index = 0
break
if flag == 1:
break
if flag == 0:
raise Exception(
'input[{}] of node[{}] does not exist in node_map'
.format(in_node, layer_name))
else:
self.connect(in_node, layer_name)
self.build_connection(layer_name, node)
#generate topo
super(ONNXGraph, self).build()
self.input_nodes = self.place_holder_nodes
def build_connection(self, layer_name, node):
"""
find connection for nodes
"""
for idx, in_node in enumerate(node.layer.input):
if in_node == '':
continue
if in_node not in self.node_map:
flag = 0
for nd in self.model.node:
for idx, opt in enumerate(nd.output):
if opt == in_node:
self.connect(nd.name, layer_name)
flag = 1
node.which_child[nd.name] = idx
self.node_map[nd.name].index = 0
break
if flag == 1:
break
if flag == 0:
raise Exception(
'input[{}] of node[{}] does not exist in node_map'.
format(in_node, layer_name))
else:
self.connect(in_node, layer_name)
def get_input_node(self, node, idx=0, copy=False):
if len(node.which_child) == 0:
ipt_node = super(ONNXGraph, self).get_node(node.inputs[idx], copy)
......@@ -450,7 +457,6 @@ class ONNXDecoder(object):
"""
make a valid code name for ParamAttr
"""
if name == '':
raise ValueError('name should not be empty')
for s in ' .*?\\/-:':
......@@ -473,6 +479,9 @@ class ONNXDecoder(object):
node.name = node.output[0]
node.name = self.make_variable_name(node.name)
for i in range(len(node.input)):
node.input[i] = self.make_variable_name(node.input[i])
if node.input[i] == '':
continue
else:
node.input[i] = self.make_variable_name(node.input[i])
for i in range(len(node.output)):
node.output[i] = self.make_variable_name(node.output[i])
......@@ -34,16 +34,14 @@ def main():
save_dir = args.save_dir
model_dir = os.path.join(save_dir, 'onnx_model_infer.onnx')
data_dir = os.path.join(save_dir, 'input_data.npy')
model = onnx.load(model_dir)
sess = rt.InferenceSession(model_dir)
inputs = np.load(data_dir, allow_pickle=True)
data_dir
inputs_dict = {}
for i, ipt in enumerate(inputs):
inputs_dict[sess.get_inputs()[i].name] = ipt
for ipt in sess.get_inputs():
data_dir = os.path.join(save_dir, ipt.name + '.npy')
inputs_dict[ipt.name] = np.load(data_dir, allow_pickle=True)
res = sess.run(None, input_feed=inputs_dict)
for idx, value_info in enumerate(model.graph.output):
np.save(os.path.join(save_dir, value_info.name), res[idx])
......
......@@ -26,8 +26,6 @@ default_op_mapping_field_values['OUTPUT_PERM'] = None
default_op_mapping_field_values['FILL_NAME_FIELD'] = True
default_op_mapping = {
'Gather': ['gather', ['X'], ['Out'],
dict(axis='')],
'Shape': ['shape', ['X'], ['Out']],
'Clip': [
'clip', ['X'], ['Out'],
......@@ -81,11 +79,6 @@ default_op_mapping = {
'Sqrt': ['sqrt', ['X'], ['Out']],
}
activefunc_op_mapping = {
'LeakyRelu': ['leaky_relu', ['X'], ['Out'],
dict(), dict(alpha=.01)],
}
default_ioa_constraint = {
'Gather':
[(lambda i, o, a: a.get('axis', 0) == 0, 'only axis = 0 is supported')],
......
......@@ -116,12 +116,14 @@ class ONNXOpMapper(OpMapper):
return False
def get_results_of_inference(self, model, value_infos, data_nodes):
inputs = []
if not os.path.exists(self.tmp_data_dir):
os.makedirs(self.tmp_data_dir)
for data_node in data_nodes:
value_info = value_infos[data_node]
ipt = np.random.random(value_info['shape']).astype(
value_info['dtype'])
inputs.append(ipt)
np.save(os.path.join(self.tmp_data_dir, data_node), ipt)
model = onnx.shape_inference.infer_shapes(model)
outputs = []
......@@ -130,11 +132,8 @@ class ONNXOpMapper(OpMapper):
model.graph.ClearField('output')
model.graph.output.MergeFrom(outputs)
if not os.path.exists(self.tmp_data_dir):
os.makedirs(self.tmp_data_dir)
onnx.save(model, os.path.join(self.tmp_data_dir,
'onnx_model_infer.onnx'))
np.save(os.path.join(self.tmp_data_dir, 'input_data.npy'), inputs)
os.system('onnx_infer --save_dir=' + self.tmp_data_dir)
return
......@@ -263,21 +262,24 @@ class ONNXOpMapper(OpMapper):
def elementwise_map(self, node):
assert node.layer_type in self.elementwise_ops
op_type = self.elementwise_ops[node.layer_type]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
if len(val_x.out_shapes[0]) < len(val_y.out_shapes[0]):
val_x, val_y = val_y, val_x
val_y_shape = val_y.out_shapes[0]
val_x_shape = val_x.out_shapes[0]
if len(val_x_shape) < len(val_y_shape):
val_x, val_y = val_y, val_x
str_y_shape = ','.join(str(e) for e in val_y_shape)
str_x_shape = ','.join(str(e) for e in val_x_shape)
slice_idx = 0
for dim in val_y_shape:
if dim == 1:
slice_idx += 1
else:
break
if str_y_shape not in str_x_shape:
for dim in val_y_shape:
if dim == 1:
slice_idx += 1
else:
break
attr = {"name": string(node.layer_name)}
if slice_idx < len(val_y_shape) and slice_idx > 0:
val_y_reshaped = val_y_shape[slice_idx:]
......@@ -353,47 +355,52 @@ class ONNXOpMapper(OpMapper):
val_scales = self.graph.get_input_node(node, idx=1, copy=True)
val_y = self.graph.get_node(node.layer.output[0], copy=True)
out_shape_ = val_y.out_shapes[0]
if out_shape_ is not None:
assert len(out_shape_) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = out_shape_[2:]
out_shape = val_y.out_shapes[0]
if out_shape is not None:
assert len(out_shape) == 4, 'only 4-D Tensor as X and Y supported'
out_shape = out_shape[2:]
scales = _const_weight_or_none(val_scales)
if isinstance(val_scales, ONNXGraphNode):
scales, _, _ = self.get_dynamic_shape(val_scales.layer_name)
attr = {'name': string(node.layer_name)}
use_scales = True
if scales is not None:
assert len(scales) == 4, 'only 4-D Tensor as X and Y supported'
assert scales[0] == 1 and scales[
1] == 1, 'only scale on (NC)HW supported'
assert scales[2] == scales[
3], 'only aspect-ratio-invariant scale supported'
try:
assert len(scales) == 4, 'only 4-D Tensor as X and Y supported'
assert scales[0] == 1 and scales[
1] == 1, 'only scale on (NC)HW supported'
assert scales[2] == scales[
3], 'only aspect-ratio-invariant scale supported'
except:
use_scales = False
scale = scales[2] if scales else None
if scale is None:
assert out_shape_, 'neither scales nor output shape is available'
out_shape = out_shape_
assert out_shape, 'neither scales nor output shape is available'
else:
out_shape = None
if out_shape_ is None:
if out_shape is None:
in_shape = val_x.out_shapes[0]
assert in_shape is not None, 'out_shape required but not inferrable'
assert len(
in_shape) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = [in_shape[2] * scale, in_shape[3] * scale]
out_shape = [in_shape[2] * scale, in_shape[3] * scale]
mode = node.get_attr('mode', 'nearest')
fluid_op = 'resize_{}'.format(mode)
if 'linear' in mode:
print(
'Warnning: paddle not support resize wiht mode: linear, we use bilinear replace linear'
'Warnning: paddle not support op:resize wiht mode: linear, we use bilinear replace linear'
)
fluid_op = 'resize_bilinear'
if isinstance(val_scales, ONNXGraphNode):
scale, _, _ = self.get_dynamic_shape(val_scales.layer_name)
if use_scales and scale is not None:
attr['scale'] = scale
else:
attr['out_shape'] = out_shape
attr = {
'scale': scale,
'out_shape': out_shape,
'name': string(node.layer_name)
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
......@@ -449,7 +456,6 @@ class ONNXOpMapper(OpMapper):
def Unsqueeze(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axes = node.get_attr('axes')
if len(val_x.out_shapes[0]) == 0:
node.fluid_code.add_layer('assign',
inputs=val_x,
......@@ -483,6 +489,7 @@ class ONNXOpMapper(OpMapper):
assert dtype == output_dtype, 'tensor dtype unmatches storage dtype'
shape = node.get_attr('shape', None)
if shape is None:
shape = val_output.out_shapes[0]
if shape is None:
......@@ -493,7 +500,7 @@ class ONNXOpMapper(OpMapper):
'using value as 1-D tensor may lead to fails',
val_output.layer_name, val_output.layer_name)
if len(value) == 1: # scalar
if len(value) == 1:
value = value.tolist()
shape = [1]
value = value[0]
......@@ -520,48 +527,34 @@ class ONNXOpMapper(OpMapper):
param_attr=attr)
def Resize(self, node):
self._interpolate(node)
def Upsample(self, node):
self._interpolate(node)
def Expand(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_scales = self.graph.get_input_node(node, idx=1, copy=True)
val_y = self.graph.get_node(node.layer.output[0], copy=True)
val_shape = self.graph.get_input_node(node, idx=1, copy=True)
out_shape_ = val_y.out_shapes[0]
if out_shape_ is not None:
assert len(out_shape_) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = out_shape_[2:]
scales = _const_weight_or_none(val_scales)
if scales is not None:
assert len(scales) == 4, 'only 4-D Tensor as X and Y supported'
assert scales[0] == 1 and scales[
1] == 1, 'only scale on (NC)HW supported'
assert scales[2] == scales[
3], 'only aspect-ratio-invariant scale supported'
scale = scales[2] if scales else None
if scale is None:
assert out_shape_, 'neither scales nor output shape is available'
out_shape = out_shape_
else:
out_shape = None
if out_shape_ is None:
in_shape = val_x.out_shapes[0]
assert in_shape is not None, 'out_shape required but not inferrable'
assert len(
in_shape) == 4, 'only 4-D Tensor as X and Y supported'
out_shape_ = [in_shape[2] * scale, in_shape[3] * scale]
if len(val_shape.outputs) == 1:
self.omit_nodes.append(val_shape.layer_name)
mode = node.get_attr('mode', 'nearest')
fluid_op = 'resize_{}'.format(mode)
attr = {
'scale': scale,
'out_shape': out_shape,
'name': string(node.layer_name)
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
val_y = self.graph.get_node(node.layer.output[0], copy=True)
out_shape = node.out_shapes[0]
val_x_dtype = val_x.dtype
def Upsample(self, node):
self._interpolate(node)
name_ones = node.layer_name + '_ones'
attr_ones = {'shape': out_shape, 'dtype': string(val_x_dtype)}
node.fluid_code.add_layer('ones',
inputs=None,
output=name_ones,
param_attr=attr_ones)
inputs = {'x': name_ones, 'y': val_x}
attr = {'name': string(node.layer_name)}
node.fluid_code.add_layer('elementwise_mul',
inputs=inputs,
output=node.layer_name,
param_attr=attr)
def Gather(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -716,7 +709,7 @@ class ONNXOpMapper(OpMapper):
'dim': axis,
'name': string(node.layer_name)
}
# generation
node.fluid_code.add_layer('split',
inputs=val_x,
output=val_y,
......@@ -731,21 +724,23 @@ class ONNXOpMapper(OpMapper):
if isinstance(val_shape, ONNXGraphDataNode):
self.omit_nodes.append(val_shape.layer_name)
attr = {'name': string(node.layer_name)}
# catch dynamic graph shape
if isinstance(val_shape, ONNXGraphNode):
shape, _, _ = self.get_dynamic_shape(val_shape.layer_name)
if val_shape.dtype == 'int64':
val_shape_cast = val_shape.layer_name + '_cast'
node.fluid_code.add_layer('cast',
inputs=val_shape,
output=val_shape_cast,
param_attr={'dtype': string('int32')})
attr['actual_shape'] = val_shape_cast
else:
attr['actual_shape'] = val_shape
if shape is None:
shape = val_reshaped.out_shapes[0]
shape_dtype = val_shape.dtype
if shape_dtype is None:
_logger.warning(
'in op %s(%s -> Reshape -> %s): '
'dtype of input "shape" not inferred, int32 assumed',
node.layer_name, val_x.layer_name, val_reshaped.layer_name)
shape_dtype = _np.dtype('int32')
if shape is None:
shape = [1, -1]
_logger.warning(
......@@ -753,8 +748,8 @@ class ONNXOpMapper(OpMapper):
'input "shape" not inferred, use [1, -1] as dummy value, '
'the behavior of Paddle fluid maybe undefined', node.layer_name,
val_x.layer_name, val_reshaped.layer_name)
attr = {'shape': shape, 'name': string(node.layer_name)}
attr['shape'] = shape
node.fluid_code.add_layer('reshape',
inputs=val_x,
output=node,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册