提交 ebff5970 编写于 作者: S SunAhong1993

fix the onnx

上级 b541e4a5
...@@ -119,19 +119,19 @@ class OpSet9(): ...@@ -119,19 +119,19 @@ class OpSet9():
# reduce function # reduce function
'ReduceMean': ['paddle.mean', 'ReduceMean': ['paddle.mean',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdims=1)], dict(axes=None, keepdims=1)],
'ReduceSum': ['paddle.sum', 'ReduceSum': ['paddle.sum',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdims=1)], dict(axes=None, keepdims=1)],
'ReduceMin': ['paddle.min', 'ReduceMin': ['paddle.min',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
'ReduceMax': ['paddle.max', 'ReduceMax': ['paddle.max',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
'ReduceProd': ['paddle.prod', 'ReduceProd': ['paddle.prod',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
# active function # active function
'Relu': ['paddle.nn.ReLU'], 'Relu': ['paddle.nn.ReLU'],
'LeakyRelu': ['paddle.nn.LeakyReLU', 'LeakyRelu': ['paddle.nn.LeakyReLU',
...@@ -150,6 +150,7 @@ class OpSet9(): ...@@ -150,6 +150,7 @@ class OpSet9():
dict(threshold='threshold'), dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))], dict(threshold=float(sys.maxsize))],
'Exp': ['paddle.exp'], 'Exp': ['paddle.exp'],
'Log': ['paddle.log'],
'LogSoftmax': ['paddle.nn.functional.log_softmax', 'LogSoftmax': ['paddle.nn.functional.log_softmax',
dict(axis='axis'), dict(axis='axis'),
dict(axis=1)], dict(axis=1)],
...@@ -320,8 +321,15 @@ class OpSet9(): ...@@ -320,8 +321,15 @@ class OpSet9():
return return
elif node.layer_type == 'Upsample': elif node.layer_type == 'Upsample':
val_scales = self.graph.get_input_node(node, idx=1, copy=True) val_scales = self.graph.get_input_node(node, idx=1, copy=True)
inputs['scale_factor'] = val_scales self.paddle_graph.add_layer(
"paddle.slice",
inputs={"input": val_scales.name},
outputs=[val_scales.name],
axes=[0],
starts=[2],
ends=[4])
inputs['scale_factor'] = val_scales.name
mode = node.get_attr('mode', 'nearest') mode = node.get_attr('mode', 'nearest')
attrs.update({"align_corners": False, attrs.update({"align_corners": False,
"mode": string(mode), "mode": string(mode),
...@@ -1013,13 +1021,12 @@ class OpSet9(): ...@@ -1013,13 +1021,12 @@ class OpSet9():
if len(value) == 1: if len(value) == 1:
value = value[0] value = value[0]
layer_attrs = { layer_attrs = {
'shape': val_shape.name,
'dtype': string(dtype), 'dtype': string(dtype),
'fill_value': value 'fill_value': value
} }
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.full", "paddle.full",
inputs={}, inputs={'shape': val_shape.name},
outputs=[node.name], outputs=[node.name],
**layer_attrs) **layer_attrs)
...@@ -1072,8 +1079,11 @@ class OpSet9(): ...@@ -1072,8 +1079,11 @@ class OpSet9():
} }
outputs_list = list() outputs_list = list()
if isinstance(split, list) or isinstance(split, tuple): if isinstance(split, list) or isinstance(split, tuple):
for i in range(len(split)): if len(split) == 1:
outputs_list.append("{}_p{}".format(node.layer_name, i)) outputs_list.append(node.name)
else:
for i in range(len(split)):
outputs_list.append("{}_p{}".format(node.layer_name, i))
else: else:
outputs_list.append(node.name) outputs_list.append(node.name)
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
...@@ -1415,6 +1425,18 @@ class OpSet9(): ...@@ -1415,6 +1425,18 @@ class OpSet9():
else: else:
if mode == 'channel': if mode == 'channel':
slope_data = _const_weight_or_none(val_slope) slope_data = _const_weight_or_none(val_slope)
if slope_data is None:
self.paddle_graph.add_layer(
"paddle.reshape",
inputs={"x": val_slope.name},
outputs=[val_slope.name],
shape=[shape_slope[0]])
self.paddle_graph.add_layer(
"paddle.nn.functional.prelu",
inputs={"x": val_x.name,
"weight": val_slope.name},
outputs=[node.name])
return
_rename_or_remove_weight(self.weights, val_slope.name) _rename_or_remove_weight(self.weights, val_slope.name)
if len(shape_slope) > 1: if len(shape_slope) > 1:
self.weights[op_name+'._weight'] = np.reshape(slope_data, shape_slope[0]) self.weights[op_name+'._weight'] = np.reshape(slope_data, shape_slope[0])
...@@ -1464,7 +1486,7 @@ class OpSet9(): ...@@ -1464,7 +1486,7 @@ class OpSet9():
"paddle.greater_than", "paddle.greater_than",
inputs={'x': val_x.name, inputs={'x': val_x.name,
'y': val_y.name}, 'y': val_y.name},
outputs=node, outputs=[node.name],
param_attr=None) param_attr=None)
@print_mapping_info @print_mapping_info
...@@ -1521,7 +1543,7 @@ class OpSet9(): ...@@ -1521,7 +1543,7 @@ class OpSet9():
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.transpose", "paddle.transpose",
inputs={"x": val_x.name}, inputs={"x": val_x.name},
outputs=[node.layer_naem], outputs=[node.layer_name],
perm=[1, 0]) perm=[1, 0])
if val_x_dim > 1: if val_x_dim > 1:
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
...@@ -1977,3 +1999,18 @@ class OpSet9(): ...@@ -1977,3 +1999,18 @@ class OpSet9():
outputs=[y_out], outputs=[y_out],
perm=[0,2,1,3] perm=[0,2,1,3]
) )
@print_mapping_info
def TopK(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_k = self.graph.get_input_node(node, idx=1, copy=True)
layer_attrs = dict()
layer_attrs["axis"] = node.get_attr('axis', -1)
layer_attrs["largest"] = True if node.get_attr('largest', 1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted', 1) == 1 else False
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=["{}_p{}".format(node.layer_name, 0), "{}_p{}".format(node.layer_name, 1)],
**layer_attrs)
...@@ -96,19 +96,19 @@ class OpSet9(): ...@@ -96,19 +96,19 @@ class OpSet9():
# reduce function # reduce function
'ReduceMean': ['paddle.mean', 'ReduceMean': ['paddle.mean',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdims=1)], dict(axes=None, keepdims=1)],
'ReduceSum': ['paddle.sum', 'ReduceSum': ['paddle.sum',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdims=1)], dict(axes=None, keepdims=1)],
'ReduceMin': ['paddle.min', 'ReduceMin': ['paddle.min',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
'ReduceMax': ['paddle.max', 'ReduceMax': ['paddle.max',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
'ReduceProd': ['paddle.prod', 'ReduceProd': ['paddle.prod',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
# active function # active function
'Relu': ['paddle.nn.functional.relu'], 'Relu': ['paddle.nn.functional.relu'],
'LeakyRelu': ['paddle.nn.functional.leaky_relu', 'LeakyRelu': ['paddle.nn.functional.leaky_relu',
...@@ -127,6 +127,7 @@ class OpSet9(): ...@@ -127,6 +127,7 @@ class OpSet9():
dict(threshold='threshold'), dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))], dict(threshold=float(sys.maxsize))],
'Exp': ['paddle.exp'], 'Exp': ['paddle.exp'],
'Log': ['paddle.log'],
'Softmax': ['paddle.nn.functional.softmax', 'Softmax': ['paddle.nn.functional.softmax',
dict(axis='axis'), dict(axis='axis'),
dict(axis=1)], dict(axis=1)],
...@@ -283,7 +284,14 @@ class OpSet9(): ...@@ -283,7 +284,14 @@ class OpSet9():
return return
elif node.layer_type == 'Upsample': elif node.layer_type == 'Upsample':
val_scales = self.graph.get_input_node(node, idx=1, copy=True) val_scales = self.graph.get_input_node(node, idx=1, copy=True)
inputs['scale'] = val_scales self.paddle_graph.add_layer(
"paddle.slice",
inputs={"input": val_scales.name},
outputs=[val_scales.name],
axes=[0],
starts=[2],
ends=[4])
inputs['scale_factor'] = val_scales.name
mode = node.get_attr('mode', 'nearest') mode = node.get_attr('mode', 'nearest')
attrs.update({"align_corners": False, attrs.update({"align_corners": False,
...@@ -977,13 +985,12 @@ class OpSet9(): ...@@ -977,13 +985,12 @@ class OpSet9():
if len(value) == 1: if len(value) == 1:
value = value[0] value = value[0]
layer_attrs = { layer_attrs = {
'shape': val_shape.name,
'dtype': string(dtype), 'dtype': string(dtype),
'fill_value': value 'fill_value': value
} }
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.full", "paddle.full",
inputs={}, inputs={'shape': val_shape.name},
outputs=[node.name], outputs=[node.name],
**layer_attrs) **layer_attrs)
...@@ -1035,8 +1042,11 @@ class OpSet9(): ...@@ -1035,8 +1042,11 @@ class OpSet9():
} }
outputs_list = list() outputs_list = list()
if isinstance(split, list) or isinstance(split, tuple): if isinstance(split, list) or isinstance(split, tuple):
for i in range(len(split)): if len(split) == 1:
outputs_list.append("{}_p{}".format(node.layer_name, i)) outputs_list.append(node.name)
else:
for i in range(len(split)):
outputs_list.append("{}_p{}".format(node.layer_name, i))
else: else:
outputs_list.append(node.name) outputs_list.append(node.name)
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
...@@ -1391,7 +1401,7 @@ class OpSet9(): ...@@ -1391,7 +1401,7 @@ class OpSet9():
"paddle.greater_than", "paddle.greater_than",
inputs={'x': val_x.name, inputs={'x': val_x.name,
'y': val_y.name}, 'y': val_y.name},
outputs=node, outputs=[node.name],
param_attr=None) param_attr=None)
@print_mapping_info @print_mapping_info
...@@ -1448,7 +1458,7 @@ class OpSet9(): ...@@ -1448,7 +1458,7 @@ class OpSet9():
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.transpose", "paddle.transpose",
inputs={"x": val_x.name}, inputs={"x": val_x.name},
outputs=[node.layer_naem], outputs=[node.layer_name],
perm=[1, 0]) perm=[1, 0])
if val_x_dim > 1: if val_x_dim > 1:
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
...@@ -1758,3 +1768,18 @@ class OpSet9(): ...@@ -1758,3 +1768,18 @@ class OpSet9():
"paddle.reciprocal", "paddle.reciprocal",
inputs={"x": val_x.name}, inputs={"x": val_x.name},
outputs=[node.name]) outputs=[node.name])
@print_mapping_info
def TopK(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_k = self.graph.get_input_node(node, idx=1, copy=True)
layer_attrs = dict()
layer_attrs["axis"] = node.get_attr('axis', -1)
layer_attrs["largest"] = True if node.get_attr('largest', 1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted', 1) == 1 else False
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=["{}_p{}".format(node.layer_name, 0), "{}_p{}".format(node.layer_name, 1)],
**layer_attrs)
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册