diff --git a/x2paddle/core/program.py b/x2paddle/core/program.py index 5e23eb2f956c1ae8c0612b1cbc14040dd98170ac..5c945dfe45c2fae04d47d57f2e2e42b184c16e1a 100644 --- a/x2paddle/core/program.py +++ b/x2paddle/core/program.py @@ -166,7 +166,6 @@ class PaddleGraph(object): self.clear_edges() outputs_from_nodes = dict() for layer_id, layer in self.layers.items(): - print(layer.kernel, layer.outputs ,layer.inputs) for input_key, input_var in layer.inputs.items(): vs = input_var if not isinstance(vs, (list, tuple)): @@ -211,9 +210,12 @@ class PaddleGraph(object): if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get( layer_id, 0) == 0 and layer.kernel != "prim.assert" \ and layer.kernel != "prim.exception" \ - and layer.kernel != "prim.warnings": - if layer.kernel == "paddle.to_tensor": + and layer.kernel != "prim.warnings" \ + and layer.outputs[0] not in self.outputs: + if layer.kernel == "paddle.to_tensor" and layer.outputs[0] in self.inputs_info: self.inputs_info.pop(layer.outputs[0]) + if layer.outputs[0] in self.inputs: + self.inputs.pop(self.inputs.index(layer.outputs[0])) invalid_list.append(layer_id) for layer_id in invalid_list: self.layers.pop(layer_id) @@ -323,6 +325,9 @@ class PaddleGraph(object): if self.source_type == "caffe": custom_import = "from x2paddle.op_mapper.static.caffe2paddle " + \ "import caffe_custom_layer as x2paddle_nn" + elif self.source_type == "onnx": + custom_import = "from x2paddle.op_mapper.static.onnx2paddle " + \ + "import onnx_custom_layer as x2paddle_nn" else: custom_import = "" @@ -352,7 +357,9 @@ class PaddleGraph(object): remove_default_attrs(layer.kernel, layer.attrs) edges_in = self.edges_in.get(layer_id, []) edges_out = self.edges_out.get(layer_id, []) - if len(edges_in) == 0 and len(edges_out) == 0: + if len(edges_in) == 0 and len(edges_out) == 0 and layer.outputs[0] not in self.outputs: + if layer.outputs[0] in self.inputs: + self.inputs.pop(self.inputs.index(layer.outputs[0])) continue line = "" @@ -472,6 +479,9 @@ class PaddleGraph(object): elif self.source_type == "pytorch": custom_import = "from x2paddle.op_mapper.dygraph.pytorch2paddle " + \ "import pytorch_custom_layer as x2paddle_nn" + elif self.source_type == "onnx": + custom_import = "from x2paddle.op_mapper.dygraph.onnx2paddle " + \ + "import onnx_custom_layer as x2paddle_nn" else: custom_import = "" self.head = gen_codes( @@ -580,7 +590,7 @@ class PaddleGraph(object): elif len(layer.outputs) == 2: line = layer.outputs[1] else: - if layer.kernel == "paddle.nn.LSTM": + if layer.kernel in ["paddle.nn.LSTM", 'custom_layer:LSTM']: line = "{}, ({})".format(layer.outputs[1], ', '.join(layer.outputs[-2:])) else: line = ','.join(layer.outputs[1:]) @@ -589,8 +599,13 @@ class PaddleGraph(object): line += " = self.{}".format(layer.outputs[0]) else: line += " = self.{}(".format(layer.outputs[0]) - for k, v in layer.inputs.items(): - line += "{}, ".format(v) + for v in layer.inputs.values(): + if isinstance(v, list): + line += "[{}], ".format(", ".join(v)) + elif isinstance(v, tuple): + line += "({}), ".format(", ".join(v)) + else: + line += "{}, ".format(v) line = line.strip(", ") line += ")" self.forward_func.extend(gen_codes([line], indent=indent)) diff --git a/x2paddle/decoder/onnx_decoder.py b/x2paddle/decoder/onnx_decoder.py index f749c6c9af4558e6f978c5257394a4f539d2b720..32e32a3853619813f7431537cf114a88ee6afc1c 100644 --- a/x2paddle/decoder/onnx_decoder.py +++ b/x2paddle/decoder/onnx_decoder.py @@ -31,6 +31,7 @@ import numpy as np from copy import deepcopy import logging as _logging import os +import copy default_op_domain = 'ai.onnx' _logger = _logging.getLogger(__name__) @@ -98,9 +99,7 @@ class ONNXGraphNode(GraphNode): def output(self, index=0): if index >0 and len(self.layer.output) <= index: raise IndexError('Output numbers of Node:{} is {} <= index:{}'.format(self.layer_name, len(self.layer.output), index)) - if index > 0: - return "{}_p{}".format(self.layer_name, index) - return self.layer_name + return self.layer.output[index] class ONNXGraphDataNode(GraphNode): @@ -132,6 +131,17 @@ class ONNXGraphDataNode(GraphNode): shape.append(dim.dim_value) out_shapes.append(shape) return out_shapes + elif isinstance(self.layer, TensorProto): + values = self.layer.dims + out_shapes = list() + shape = list() + for dim in values: + if dim == 0: + shape.append(-1) + else: + shape.append(dim) + out_shapes.append(shape) + return out_shapes else: values = self.layer.dims out_shapes = list() @@ -241,11 +251,12 @@ class ONNXGraph(Graph): """ generate output_nodes node of ONNX model """ - inner_nodes = self.get_inner_nodes() output_nodes = [value.name for value in self.graph.output] for opt_data in output_nodes: - if opt_data not in inner_nodes: - self.output_nodes.append(opt_data) + #n = super(ONNXGraph, self).get_node(opt_data) + #if n is None: + # self.topo_sort.append(self.node_map[opt_data]) + self.output_nodes.append(opt_data) def is_place_holder_nodes(self, layer): """ @@ -293,7 +304,7 @@ class ONNXGraph(Graph): #generate topo super(ONNXGraph, self).build() - self.input_nodes = self.place_holder_nodes + self.input_nodes = copy.deepcopy(self.place_holder_nodes) def build_connection(self, layer_name, node): """ @@ -410,10 +421,8 @@ class ONNXDecoder(object): check_model(onnx_model) onnx_model = self.optimize_model_skip_op(onnx_model) - onnx_model = self.optimize_model_strip_initializer(onnx_model) onnx_model = self.optimize_node_name(onnx_model) self.graph = ONNXGraph(onnx_model) - #self.onnx_model = onnx_model def build_value_refs(self, nodes): """ diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/__init__.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63de3d711412d1ebfacec7080f5ee8a92cc1b0b4 --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .one_hot import OneHot +from .rnn import LSTM +from .pad_two_input import PadWithTwoInput +from .pad_all_dim2 import PadAllDim2 +from .pad_all_dim4 import PadAllDim4 +from .pad_all_dim4_one_input import PadAllDim4WithOneInput diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/one_hot.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/one_hot.py new file mode 100644 index 0000000000000000000000000000000000000000..def62ed8e8501dba5beb86e0759214b559cc0d0a --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/one_hot.py @@ -0,0 +1,38 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +class OneHot(object): + def __init__(self, axis): + self.axis = axis + + def __call__(self, indices, depth, values): + indices_shape = indices.shape + rank = len(indices.shape) + real_axis = self.axis + if self.axis < 0: + real_axis = self.axis + rank + 1 + depth_range = paddle.arange(end=depth) + ls = tuple(indices_shape[0: real_axis]) + rs = tuple(indices_shape[real_axis: rank]) + targets = paddle.reshape(depth_range, (1,) * (real_axis-0) + tuple(depth_range.shape) + (1,) * (rank-real_axis)) + mod = paddle.mod(indices, depth) + v = paddle.reshape(mod, ls + (1,) + rs) + out = targets == v + out = paddle.cast(out, "float32") + on_value = paddle.slice(values, axes=[0], starts=[1], ends=[2]) + off_value = paddle.slice(values, axes=[0], starts=[0], ends=[1]) + out = out * (on_value - off_value) + off_value + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim2.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim2.py new file mode 100644 index 0000000000000000000000000000000000000000..6228ae7bbad5f39db998dab41fc824fa51182f03 --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim2.py @@ -0,0 +1,35 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from x2paddle.core.util import * + +class PadAllDim2(object): + def __init__(self, value, mode): + self.layer_attrs = {} + self.layer_attrs['mode'] = mode + self.layer_attrs['data_format'] = 'NCHW' + self.layer_attrs['value'] = value + + + def __call__(self, x, pad): + pad = paddle.reshape(pad, shape=[2, -1]) + pad = paddle.transpose(pad, perm=[1, 0]) + pad = paddle.reverse(pad, axis=[0]) + pad = paddle.flatten(pad) + pad = paddle.cast(pad, dtype="int32") + x = paddle.unsqueeze(x, axis=[0, 1]) + out = paddle.nn.functional.pad(x=x, pad=pad, **self.layer_attrs) + out = paddle.squeeze(out, axis=[0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4.py new file mode 100644 index 0000000000000000000000000000000000000000..d1c2c382cda57a211b465155acace5d578a1657d --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4.py @@ -0,0 +1,37 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from x2paddle.core.util import * + +class PadAllDim4(object): + def __init__(self, value, mode): + self.layer_attrs = {} + self.layer_attrs['mode'] = mode + self.layer_attrs['data_format'] = 'NCHW' + self.layer_attrs['value'] = value + + + def __call__(self, x, pad): + pad = paddle.reshape(pad, shape=[2, -1]) + pad = paddle.transpose(pad, perm=[1, 0]) + pad = paddle.reverse(pad, axis=[0]) + pad = paddle.flatten(pad) + pad = paddle.cast(pad, dtype="int32") + pad1, pad2 = paddle.split(pad, num_or_sections=2, axis=0) + x = paddle.nn.functional.pad(x=x, pad=pad1, **self.layer_attrs) + x = paddle.transpose(x, perm=[2, 3, 0, 1]) + x = paddle.nn.functional.pad(x=x, pad=pad2, **self.layer_attrs) + out = paddle.transpose(x, perm=[2, 3, 0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py new file mode 100644 index 0000000000000000000000000000000000000000..9ad3048081a1dbb6dfe57cce2a235a8a57aa6c2f --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py @@ -0,0 +1,32 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from x2paddle.core.util import * + +class PadAllDim4WithOneInput(object): + def __init__(self, pad, value, mode): + self.layer_attrs = {} + self.layer_attrs['mode'] = mode + self.layer_attrs['data_format'] = 'NCHW' + self.layer_attrs['value'] = value + self.pad1 = pad[0: 4] + self.pad2 = pad[4: 9] + + def __call__(self, x): + x = paddle.nn.functional.pad(x=x, pad=self.pad1, **self.layer_attrs) + x = paddle.transpose(x, perm=[2, 3, 0, 1]) + x = paddle.nn.functional.pad(x=x, pad=self.pad2, **self.layer_attrs) + out = paddle.transpose(x, perm=[2, 3, 0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_two_input.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_two_input.py new file mode 100644 index 0000000000000000000000000000000000000000..e1053eda35b399a3cb3976347d30659e8ef74d7d --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_two_input.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from x2paddle.core.util import * + +class PadWithTwoInput(object): + def __init__(self, value, mode, data_format): + self.layer_attrs = {} + self.layer_attrs['mode'] = mode + self.layer_attrs['data_format'] = data_format + self.layer_attrs['value'] = value + + + def __call__(self, x, pad): + pad = paddle.reshape(pad, shape=[2, -1]) + pad = paddle.transpose(pad, perm=[1, 0]) + pad = paddle.reverse(pad, axis=[0]) + pad = paddle.flatten(pad) + pad = paddle.cast(pad, dtype="int32") + out = paddle.nn.functional.pad(x=x, pad=pad, **self.layer_attrs) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py index 0971d8e8f27c52e877074db27f9b9a210e0ef78b..bf31b2f98633fc689bab244850e4d1b61281d8f4 100644 --- a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py @@ -104,6 +104,9 @@ class OpSet9(): 'ReduceMax': ['paddle.max', dict(axes='axis', keepdims='keepdim'), dict(keepdim=1)], + 'ReduceProd': ['paddle.prod', + dict(axes='axis', keepdims='keepdim'), + dict(keepdim=1)], # active function 'Relu': ['paddle.nn.ReLU'], 'LeakyRelu': ['paddle.nn.LeakyReLU', @@ -142,6 +145,7 @@ class OpSet9(): self.inputs_info = dict() self.weights = dict() self.nn_name2id = dict() + self.done_weight_list = list() @print_mapping_info def directly_map(self, node, *args, **kwargs): @@ -163,11 +167,12 @@ class OpSet9(): layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] else: layer_attrs[pd_attr_name] = op_info[2][onnx_attr_name] - if paddle_op.startswith("paddle.nn"): + if paddle_op.startswith("paddle.nn") and 'functional' not in paddle_op: op_name = paddle_op[10:].lower() op_name = name_generator(op_name, self.nn_name2id) output_name = node.name layer_outputs = [op_name, output_name] + self.paddle_graph.add_layer( kernel=paddle_op, inputs={"x": input.name}, @@ -215,7 +220,7 @@ class OpSet9(): node = parameter dtype = node.dtype shape = node.out_shapes[0] - if len(node.weight.shape) == 0: + if hasattr(node.weight, "shape") and len(node.weight.shape) == 0: self.paddle_graph.add_layer( "paddle.full", inputs={}, @@ -232,8 +237,7 @@ class OpSet9(): shape=shape, attr=string(node.name), dtype=string(dtype), - default_initializer="paddle.nn.initializer.Constant(value=0.0)") - + default_initializer="paddle.nn.initializer.Constant(value=0.0)") def _pad_if_asymmetric(self, node, pads, val_name): # pads: SSEE assert len(pads) & 1 == 0 @@ -299,6 +303,10 @@ class OpSet9(): attrs.update({"align_corners": False, "mode": string(mode), "align_mode": 1}) + val_x_shape = val_x.out_shapes[0] + if mode == "linear" and len(val_x_shape) == 4: + attrs["mode"] = string("bilinear") + attrs["align_corners"] = True self.paddle_graph.add_layer( kernel="paddle.nn.functional.interpolate", inputs=inputs, @@ -382,63 +390,131 @@ class OpSet9(): def Pad(self, node, op_independent=True): val_x = self.graph.get_input_node(node, idx=0, copy=True) pads = node.get_attr('pads') + is_pads_attr = True + if pads is None: + val_pad = self.graph.get_input_node(node, idx=1, copy=True) + pad_shape = val_pad.out_shapes[0] + is_pads_attr = False + pads = _const_weight_or_none(val_pad) + if pads is not None: + is_pads_attr = True mode = node.get_attr('mode', 'constant') value = node.get_attr('value', 0.) data_shape = val_x.out_shapes[0] output_shape = node.out_shapes[0] - assume_pad2d = False + assume_pad = False layer_attrs = {} layer_attrs['mode'] = string(mode) - paddings = [] - if len(pads) == 4: - assume_pad2d |= mode != 'constant' - if data_shape: - assume_pad2d |= data_shape and len(data_shape) == 4 # NCHW - if output_shape: - assume_pad2d |= output_shape and len(output_shape) == 4 # NCHW - if assume_pad2d: - paddle_op = 'paddle.nn.Pad2D' - layer_attrs['data_format'] = string('NCHW') - layer_attrs['value'] = value - else: - paddle_op = 'paddle.fluid.layers.pad' - layer_attrs["pad_value"] = value - if len(pads) == 4: - paddings = np.array(pads).reshape( - (-1, 2)).transpose().flatten().tolist() # SSEE -> SESE - elif len(pads) == 8: - paddings = np.array(pads).reshape( - (-1, 4)).transpose().flatten().tolist() # SSEE -> SESE - if sum(paddings[:4]) == 0: - paddle_op = 'paddle.nn.Pad2D' - paddings = paddings[4:] - layer_attrs['value'] = value - if 'pad_value' in layer_attrs: - layer_attrs.pop('pad_value') - tmp_paddings = copy.deepcopy(paddings) - paddings[0] = tmp_paddings[2] - paddings[1] = tmp_paddings[3] - paddings[2] = tmp_paddings[0] - paddings[3] = tmp_paddings[1] - if paddle_op == 'paddle.nn.Pad2D': - layer_attrs['padding'] = paddings - nn_op_name = name_generator("pad2d", self.nn_name2id) + layer_attrs['value'] = value + if not op_independent: + output_name = node.name + '_paded' else: - layer_attrs['paddings'] = paddings - if op_independent: + output_name = node.name + nn_op_name = name_generator("pad", self.nn_name2id) + layer_outputs = [nn_op_name, output_name] + if is_pads_attr: + paddings = [] + if len(pads) in [2, 4, 6]: + if data_shape: + assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * (len(output_shape) - 2) == len(pads) # NCHW + if assume_pad: + paddle_op = 'paddle.nn.Pad{}D'.format(len(output_shape) - 2) + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32") + paddings = np.flip(paddings, axis=0).flatten().tolist() + layer_attrs['padding'] = paddings + else: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW + if assume_pad: + paddle_op = 'paddle.nn.functional.pad' + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32").flatten().tolist() + layer_attrs['pad'] = paddings + else: + raise Exception("The padding value {} is wrong!".format(pads)) + elif len(pads) == 8: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW + if assume_pad: + paddle_op = 'paddle.nn.Pad2D' + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32") + paddings = np.flip(paddings, axis=0).flatten().tolist() + if sum(paddings[:4]) == 0: + paddings = paddings[4:] + layer_attrs['padding'] = paddings + else: + layer_attrs["pad"] = paddings + paddle_op = "custom_layer:PadAllDim4WithOneInput" + else: + raise Exception("The padding value {} is wrong!".format(pads)) self.paddle_graph.add_layer( paddle_op, inputs={'x': val_x.name}, - outputs=[nn_op_name, node.name] if paddle_op == 'paddle.nn.Pad2D' else [node.name], + outputs=layer_outputs[1:] if paddle_op == 'paddle.nn.functional.pad' else layer_outputs, **layer_attrs) + if not op_independent: + return node.name + '_paded' else: - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x.name}, - outputs=[nn_op_name, node.name + '_paded'] if paddle_op == 'paddle.nn.Pad2D' \ - else [node.name + '_paded'], - **layer_attrs) - return node.name + '_paded' + pads_len = val_pad.out_shapes[0][0] + if pads_len in [2, 4, 6]: + if data_shape: + assume_pad |= data_shape and 2 * (len(data_shape) - 2) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * (len(output_shape) - 2) == pads_len # NCHW + if assume_pad: + if pads_len == 2: + data_format = "NCL" + elif pads_len == 4: + data_format = "NCHW" + else: + data_format = "NCDHW" + self.paddle_graph.add_layer( + "custom_layer:PadWithTwoInput", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode), + data_format=string(data_format)) + else: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW + if assume_pad: + if pads_len == 4: + self.paddle_graph.add_layer( + "custom_layer:PadAllDim2", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode)) + else: + raise Exception("The padding value is wrong!") + elif pads_len == 8: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW + if assume_pad: + self.paddle_graph.add_layer( + "custom_layer:PadAllDim4", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode)) + else: + print(pads_len) + raise Exception("The padding value is wrong!") + if not op_independent: + return node.name + '_paded' @print_mapping_info def Unsqueeze(self, node): @@ -530,11 +606,13 @@ class OpSet9(): val_scale = self.graph.get_input_node(node, idx=1, copy=True) val_b = self.graph.get_input_node(node, idx=2, copy=True) epsilon = node.get_attr('epsilon', 1e-5) + self.weights[op_name+'.scale'] = self.weights[val_scale.name] + self.weights[op_name+'.bias'] = self.weights[val_b.name] layer_attrs = { 'num_features': node.out_shapes[0][1], 'epsilon': epsilon, - 'weight_attr': string(val_scale.name), - 'bias_attr': string(val_b.name) + #'weight_attr': string(val_scale.name), + #'bias_attr': string(val_b.name) } dim = len(val_x.out_shapes[0]) if dim == 3: @@ -640,7 +718,7 @@ class OpSet9(): self.paddle_graph.add_layer( 'paddle.cast', inputs={"x": indices.name}, - outputs=indices_cast, + outputs=[indices_cast], dtype=string('int64')) op_name = name_generator("embedding", self.nn_name2id) output_name = node.name @@ -649,8 +727,9 @@ class OpSet9(): 'paddle.nn.Embedding', inputs={"x": indices_cast}, outputs=layer_outputs, - param_attr=string(val_x.name), - size=val_x.out_shapes[0]) + weight_attr=string(val_x.name), + num_embeddings=val_x.out_shapes[0][0], + embedding_dim=val_x.out_shapes[0][1]) else: from functools import reduce reshape_shape = reduce(lambda x, y: x * y, indices_shape) @@ -822,20 +901,27 @@ class OpSet9(): starts = self.graph.get_input_node(node, idx=1, copy=True) ends = self.graph.get_input_node(node, idx=2, copy=True) starts_value = _const_weight_or_none(starts) + if starts_value is not None: + starts_value = starts_value.tolist() ends_value = _const_weight_or_none(ends) - + if ends_value is not None: + ends_value = ends_value.tolist() + if len(node.inputs) > 2: + s_len = len(val_x.out_shapes[0]) + axes = list(range(s_len)) if len(node.inputs) > 3: - axes = self.graph.get_input_node(node, idx=3, copy=True) - axes = _const_weight_or_none(axes, necessary=True) + axes_node = self.graph.get_input_node(node, idx=3, copy=True) + axes = _const_weight_or_none(axes_node, necessary=True).tolist() if len(node.inputs) > 4: steps = self.graph.get_input_node(node, idx=4, copy=True) - steps = _const_weight_or_none(steps) + steps = _const_weight_or_none(steps).tolist() + layer_attrs = { "axes": axes, "starts": starts.name, "ends": ends.name } - if starts_value is not None and ends_value is not None: + if starts_value is not None and ends_value is not None and axes is not None: starts_value = starts_value.copy() ends_value = ends_value.copy() #for idx in range(len(ends_value)): @@ -865,6 +951,8 @@ class OpSet9(): layer_attrs['starts'] = starts_cast if ends.dtype != 'int32': ends_cast = ends.name + '_cast' + else: + ends_cast = ends.name self.paddle_graph.add_layer( 'paddle.cast', inputs={"x": ends.name}, @@ -880,6 +968,7 @@ class OpSet9(): ends[idx] = 2**31 - 1 layer_attrs = {"axes": axes, "starts": starts, "ends": ends} + if steps is not None: layer_attrs['strides'] = steps self.paddle_graph.add_layer( @@ -1005,11 +1094,17 @@ class OpSet9(): inputs={'x': val_shape.name}, outputs=[val_shape.name], shape=val_shape.out_shapes[0]) + if val_shape.dtype != "int32": + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={'x': val_shape.name}, + outputs=[val_shape.name], + dtype=string("int32")) self.paddle_graph.add_layer( 'paddle.reshape', inputs={'x': val_x.name, 'shape': val_shape.name}, - outputs=node) + outputs=[node.name]) @print_mapping_info def Cast(self, node): @@ -1227,6 +1322,10 @@ class OpSet9(): epsilon = node.get_attr('epsilon', 1e-5) c = val_x.out_shapes[0][1] + self.weights[op_name + '.weight'] = self.weights[val_scale.name] + self.weights[op_name + '.bias'] = self.weights[val_b.name] + self.weights[op_name + '._variance'] = self.weights[val_var.name] + self.weights[op_name + '._mean'] = self.weights[val_mean.name] # Attribute: spatial is used in BatchNormalization-1,6,7 spatial = bool(node.get_attr('spatial')) layer_attrs = { @@ -1234,10 +1333,10 @@ class OpSet9(): "momentum": momentum, "epsilon": epsilon, "is_test": True, - "param_attr": string(val_scale.name), - "bias_attr": string(val_b.name), - "moving_mean_name": string(val_mean.name), - "moving_variance_name": string(val_var.name), + #"param_attr": string(val_scale.name), + #"bias_attr": string(val_b.name), + #"moving_mean_name": string(val_mean.name), + #"moving_variance_name": string(val_var.name), "use_global_stats": False, } self.paddle_graph.add_layer( @@ -1249,7 +1348,10 @@ class OpSet9(): @print_mapping_info def Transpose(self, node): val_x = self.graph.get_input_node(node, idx=0, copy=True) - perm = node.get_attr('perm') + s_len = len(val_x.out_shapes[0]) + perm_default = list(range(s_len)) + perm_default.reverse() + perm = node.get_attr('perm', perm_default) self.paddle_graph.add_layer( "paddle.transpose", inputs={"x": val_x.name}, @@ -1266,26 +1368,51 @@ class OpSet9(): mode = 'channel' shape_slope = val_slope.out_shapes[0] - if shape_slope == [1]: + if shape_slope == [1] * len(shape_slope): mode = 'all' - elif len(shape_slope) > 2: - raise Exception("The 'element' mode is not supported yet!") - - if mode == 'channel' and len(shape_slope) == 1: - # paddle params shape need be [1, channel] - slope_data = _const_weight_or_none(val_slope) - slope_data = np.reshape(slope_data, [1] + shape_slope) - self.weights[val_slope.name] = slope_data - num_parameters = val_x.out_shapes[0][1] - else: - num_parameters = 1 - self.paddle_graph.add_layer( - "paddle.nn.PReLU", - inputs={"x": val_x.name}, - outputs=layer_outputs, - num_parameters=num_parameters, - weight_attr=string(val_slope.name)) + if mode == "element": + self.paddle_graph.add_layer( + "paddle.zeros", + inputs={}, + outputs=[output_name + "__zeros"], + shape=shape_slope, + dtype=string(node.dtype)) + self.paddle_graph.add_layer( + "paddle.maximum", + inputs={"x": val_x.name, + "y": output_name + "__zeros"}, + outputs=[output_name + "__max"]) + self.paddle_graph.add_layer( + "paddle.minimum", + inputs={"x": val_x.name, + "y": output_name + "__zeros"}, + outputs=[output_name + "__max"]) + self.paddle_graph.add_layer( + "paddle.multiply", + inputs={"x": val_slope.name, + "y": output_name + "__min"}, + outputs=[output_name + "__mul"]) + self.paddle_graph.add_layer( + "paddle.add", + inputs={"x": output_name + "__max", + "y": output_name + "__mul"}, + outputs=[output_name]) + else: + if mode == 'channel': + slope_data = _const_weight_or_none(val_slope) + if len(shape_slope) > 1: + #self.weights[val_slope.name] = np.reshape(slope_data, shape_slope[0]) + self.weights[op_name+'._weight'] = np.reshape(slope_data, shape_slope[0]) + num_parameters = val_x.out_shapes[0][1] + else: + num_parameters = 1 + self.weights[op_name+'._weight'] = np.reshape(self.weights[val_slope.name], [1]) + self.paddle_graph.add_layer( + "paddle.nn.PReLU", + inputs={"x": val_x.name}, + outputs=layer_outputs, + num_parameters=num_parameters) @print_mapping_info def Squeeze(self, node): @@ -1553,6 +1680,7 @@ class OpSet9(): strides[1]) paddings = pad_h + pad_w + layer_inputs = {'x': val_x if isinstance(val_x, str) else val_x.name} layer_attrs = { "in_channels": num_in_channels * num_groups, "out_channels": num_out_channels, @@ -1561,20 +1689,41 @@ class OpSet9(): "padding": paddings, "dilation": dilations, "groups": num_groups, - 'weight_attr': string(val_w.name), } + val_w_name = val_w.name + while val_w_name in self.done_weight_list: + val_w_name += "__repeat" + self.done_weight_list.append(val_w_name) + self.weights[op_name + '.weight'] = self.weights[val_w.name] if has_bias: - layer_attrs["bias_attr"] = string(val_b.name) + val_b_name = val_b.name + while val_b_name in self.done_weight_list: + val_b_name += "__repeat" + self.done_weight_list.append(val_b_name) + self.weights[op_name + '.bias'] = self.weights[val_b.name] else: layer_attrs["bias_attr"] = False + input_shape = val_x.out_shapes[0] + if reduce(lambda x,y:x*y, input_shape) in [1, -1] and 1 not in input_shape: + input_shape[1] = num_in_channels * num_groups + input_shape[0] = 0 + input_shape[2] = 0 + self.paddle_graph.add_layer( + "paddle.reshape", + inputs=layer_inputs, + outputs=[layer_inputs["x"]], + shape=input_shape) self.paddle_graph.add_layer( paddle_op, - inputs={'x': val_x if isinstance(val_x, str) else val_x.name}, + inputs=layer_inputs, outputs=layer_outputs, **layer_attrs) @print_mapping_info def ConvTranspose(self, node): + op_name = name_generator("conv_trans", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] val_x = self.graph.get_input_node(node, idx=0, copy=True) val_w = self.graph.get_input_node(node, idx=1, copy=True) val_b = None @@ -1588,7 +1737,7 @@ class OpSet9(): assert 2 <= convnd <= 3, 'only Conv2DTranspose and Conv3DTranspose supported' num_in_channels = val_w.out_shapes[0][0] num_out_channels = val_w.out_shapes[0][1] - paddle_op = 'paddle.nn.functional.conv{}d_transpose'.format(convnd) + paddle_op = 'paddle.nn.Conv{}DTranspose'.format(convnd) num_groups = node.get_attr('group', 1) strides = node.get_attr('strides', [1] * convnd) @@ -1607,22 +1756,22 @@ class OpSet9(): ) * strides[1] - 2 * paddings[1] + dilations[1] * ( kernel_shape[1] - 1) + 1 + out_padding[1] # Conv2DTranspose缺少output_size,只能在forward里头传进output_size - inputs_dict = {'x': val_x if isinstance(val_x, str) else val_x.name, - "weight": val_w.name} + inputs_dict = {'x': val_x if isinstance(val_x, str) else val_x.name} layer_attrs = { + "in_channels": num_in_channels, + "out_channels": num_out_channels, + 'kernel_size': kernel_shape, "stride": strides, "dilation": dilations, "padding": paddings, - "groups": num_groups, - "output_size": node.out_shapes[0][2:]} + "groups": num_groups} + self.weights[op_name + '.weight'] = self.weights[val_w.name] if val_b is not None: - inputs_dict["bias"] = val_b.name - else: - layer_attrs["bias"] = None + self.weights[op_name + '.bias'] = self.weights[val_b.name] self.paddle_graph.add_layer( - kernel="paddle.nn.functional.conv2d_transpose", + kernel=paddle_op, inputs=inputs_dict, - outputs=[node.name], + outputs=layer_outputs, **layer_attrs) @print_mapping_info @@ -1638,64 +1787,158 @@ class OpSet9(): outputs=[node.name], **layer_attrs) + @print_mapping_info - def LSTM(self, node): - # parameters order in paddle:lstm: - # 1. gate order in paddle is: input, forget, cell, output. - # 2. gate orfer in onnx is: input, output, forget, cell. - - def reform_weights(w, n, intervals): - slices = [w[:,x * n: y * n] for x, y in intervals] - return np.concatenate(slices, axis=1) - - def transform_weight_with_bias(weights, n, intervals): - return [reform_weights(w, n, intervals) for w in weights] + def Size(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.shape", + inputs={"input": val_x.name}, + outputs=[node.name]) + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": node.name}, + outputs=[node.name], + dtype=string('int64')) + self.paddle_graph.add_layer( + "paddle.prod", + inputs={"x": node.name}, + outputs=[node.name]) + + @print_mapping_info + def Sign(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + if node.dtype not in ["float16", "float32", "float64"]: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": val_x.name}, + outputs=[val_x.name], + dtype=string("float32")) + self.paddle_graph.add_layer( + "paddle.sign", + inputs={"x": val_x.name}, + outputs=[node.name]) + if node.dtype not in ["float16", "float32", "float64"]: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": node.name}, + outputs=[node.name], + dtype=string(node.dtype)) + + @print_mapping_info + def OneHot(self, node): + nn_op_name = name_generator("onehot", self.nn_name2id) + output_name = node.name + layer_outputs = [nn_op_name, output_name] + indices = self.graph.get_input_node(node, idx=0, copy=True) + depth = self.graph.get_input_node(node, idx=1, copy=True) + values = self.graph.get_input_node(node, idx=2, copy=True) + axis = node.get_attr('axis', -1) + self.paddle_graph.add_layer( + "custom_layer:OneHot", + inputs={"indices": indices.name, + "depth": depth.name, + "values": values.name}, + outputs=layer_outputs, + axis=axis) + + @print_mapping_info + def Reciprocal(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.reciprocal", + inputs={"x": val_x.name}, + outputs=[node.name]) - print(node.layer.input) + @print_mapping_info + def LSTM(self, node): x = self.graph.get_input_node(node, idx=0, copy=True) input_weight = self.graph.get_input_node(node, idx=1, copy=True) hidden_weight = self.graph.get_input_node(node, idx=2, copy=True) input_nums = len(node.layer.input) exist_input_nums = 3 + have_bias = False if input_nums > 3 and node.layer.input[3] != '': bias = self.graph.get_input_node(node, idx=exist_input_nums, copy=True) + have_bias = True exist_input_nums += 1 if input_nums > 4 and node.layer.input[4] != '': sequence_lens = self.graph.get_input_node(node, idx=exist_input_nums, copy=True) exist_input_nums += 1 if input_nums > 5 and node.layer.input[5] != '': init_h = self.graph.get_input_node(node, idx=exist_input_nums, copy=True) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": init_h.name}, + outputs=[init_h.name], + shape=init_h.out_shapes[0] + ) exist_input_nums += 1 if input_nums > 6 and node.layer.input[6] != '': init_c = self.graph.get_input_node(node, idx=exist_input_nums, copy=True) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": init_c.name}, + outputs=[init_c.name], + shape=init_c.out_shapes[0] + ) input_weight_np = _const_weight_or_none(input_weight) - hidden_size = node.get_attr('hidden_size', input_weight_np.shape[1]/3) + hidden_size = node.get_attr('hidden_size', input_weight_np.shape[1]/4) input_size = input_weight_np.shape[2] hidden_weight_np = _const_weight_or_none(hidden_weight) bias_np = _const_weight_or_none(bias) - input_bias_np = bias_np[:, :3*hidden_size] - hidden_bias_np = bias_np[:, 3*hidden_size:] + input_bias_np = bias_np[:, :4*hidden_size] + hidden_bias_np = bias_np[:, 4*hidden_size:] + + # parameters order in paddle:lstm: + # 1. gate order in paddle is: input, forget, cell, output. + # 2. gate orfer in onnx is: input, output, forget, cell. + + def reform_weights(w, n, intervals): + slices = [w[:,x * n: y * n] for x, y in intervals] + return np.concatenate(slices, axis=1) - reform_permutation = [(0, 1), (3, 4), (1, 3)] + def transform_weight_with_bias(weights, n, intervals): + return [reform_weights(w, n, intervals) for w in weights] + + reform_permutation = [(0, 1), (2, 4), (1, 2)] input_weight_np, hidden_weight_np, input_bias_np, hidden_bias_np = transform_weight_with_bias( [input_weight_np, hidden_weight_np, input_bias_np, hidden_bias_np], hidden_size, reform_permutation) - self.weights[input_weight.name] = input_weight_np - self.weights[hidden_weight.name] = hidden_weight_np - input_bias_name = bias.name + '_input' - hidden_bias_name = bias.name + '_hidden' - self.weights[input_bias_name] = input_bias_np - self.weights[hidden_bias_name] = hidden_bias_np - op_name = name_generator("lstm", self.nn_name2id) - y_out = node.output(0) yh_out = node.output(1) yc_out = node.output(2) + direction = node.get_attr('direction', 'forward') + if direction == 'backward': + raise Exception("LSTM support 'forward' or 'bidirectional', except '{}'.".format(direction)) + elif direction == 'forward': + self.weights[input_weight.name] = input_weight_np.squeeze(0) + self.weights[hidden_weight.name] = hidden_weight_np.squeeze(0) + self.weights[input_bias_name] = input_bias_np.squeeze(0) + self.weights[hidden_bias_name] = hidden_bias_np.squeeze(0) + else: + param_names = [] + for direct in range(2): + suffix = '_reverse' if direct == 1 else '' + param_names.extend(['{}.weight_ih_l0{}', '{}.weight_hh_l0{}']) + if have_bias != False: param_names.append('{}.bias_ih_l0{}') + if have_bias != False: param_names.append('{}.bias_hh_l0{}') + param_names = [x.format(op_name, suffix) for x in param_names] + + self.weights[param_names[0]] = input_weight_np[0] + self.weights[param_names[4]] = input_weight_np[1] + self.weights[param_names[1]] = hidden_weight_np[0] + self.weights[param_names[5]] = hidden_weight_np[1] + self.weights[param_names[2]] = input_bias_np[0] + self.weights[param_names[6]] = input_bias_np[1] + self.weights[param_names[3]] = hidden_bias_np[0] + self.weights[param_names[7]] = hidden_bias_np[1] + self.paddle_graph.add_layer( 'paddle.nn.LSTM', inputs={'input': x.name, 'initial_states': (init_h.name, init_c.name)}, @@ -1703,18 +1946,14 @@ class OpSet9(): input_size=input_size, hidden_size=hidden_size, num_layers=1, - weight_ih_attr=string(input_weight.name), - weight_hh_attr=string(hidden_weight.name), - bias_ih_attr=string(input_bias_name), - bias_hh_attr=string(hidden_bias_name), - direction=string(node.get_attr('direction')), + direction=string(direction), time_major=True) self.paddle_graph.add_layer( 'paddle.reshape', inputs={"x": y_out}, outputs=[y_out], - shape=[-1, -1, -1, hidden_size] + shape=[0, 0, -1, hidden_size] ) self.paddle_graph.add_layer( 'paddle.transpose', diff --git a/x2paddle/op_mapper/dygraph/pytorch2paddle/pytorch_custom_layer/gather.py b/x2paddle/op_mapper/dygraph/pytorch2paddle/pytorch_custom_layer/gather.py index 10850ee5bbf91fa42e39f4dbd67ec1fa0d6682d7..6ef9d488587cdbe01dbf7ae343a008094113bc81 100644 --- a/x2paddle/op_mapper/dygraph/pytorch2paddle/pytorch_custom_layer/gather.py +++ b/x2paddle/op_mapper/dygraph/pytorch2paddle/pytorch_custom_layer/gather.py @@ -13,8 +13,6 @@ # limitations under the License. import paddle -from itertools import product -import numpy as np class Gather(object): def __init__(self, dim): diff --git a/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py b/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py index fe940663f07e0ba2ba0f22e53c2e3e711ef8757d..37136a12b2fa6b2c36e9382561f6b501c46edd69 100644 --- a/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py +++ b/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py @@ -642,27 +642,6 @@ class TFOpMapper(OpMapper): assert paddings.layer_type == "Const", "Padding should be Const" paddings = paddings.value.flatten().tolist() - if len(input.out_shapes[0]) == 4: - if paddings[0] + paddings[1] + paddings[6] + paddings[7] == 0: - new_padding = paddings[2:6] - transpose_name = gen_name("pad", "transpose") - self.paddle_graph.add_layer( - kernel="paddle.transpose", - inputs={"x": input.name}, - outputs=[transpose_name], - perm=[0, 3, 1, 2]) - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.pad", - inputs={"x": transpose_name}, - outputs=[node.name], - pad=new_padding) - self.paddle_graph.add_layer( - kernel="paddle.transpose", - inputs={"x": node.name}, - outputs=[node.name], - perm=[0, 2, 3, 1]) - return - self.paddle_graph.add_layer( kernel="paddle.nn.functional.pad", inputs={"x": input.name}, @@ -670,31 +649,11 @@ class TFOpMapper(OpMapper): pad=paddings) def MirrorPad(self, node): - op_name = name_generator("pad", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - input = self.graph.get_input_node(node, 0) - paddings = self.graph.get_input_node(node, 1) - assert paddings.layer_type == "Const", "Padding should be Const" - new_paddings = numpy.flip(paddings.value, 0).flatten().tolist() - dim = int(len(new_paddings) / 2) - transpose_name = gen_name("pad", "transpose") - self.paddle_graph.add_layer( - kernel="paddle.transpose", - inputs={"x": input.name}, - outputs=[transpose_name], - perm=[0, 3, 1, 2]) - self.paddle_graph.add_layer( - kernel="paddle.nn.Pad{}D".format(dim), - inputs={"x": transpose_name}, - outputs=layer_outputs, - pad=new_paddings) - self.paddle_graph.add_layer( - kernel="paddle.transpose", - inputs={"x": node.name}, - outputs=[node.name], - perm=[0, 2, 3, 1]) + self.Pad(node) + + def PadV2(self, node): + self.Pad(node) def Squeeze(self, node): input = self.graph.get_input_node(node, 0) diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/__init__.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d8161ae1f8a76bff61a1cd8a23de15b3a1bf9549 --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .one_hot import one_hot +from .pad_two_input import pad_with_two_input +from .pad_all_dim2 import pad_all_dim2 +from .pad_all_dim4 import pad_all_dim4 +from .pad_all_dim4_one_input import pad_all_dim4_one_input \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/one_hot.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/one_hot.py new file mode 100644 index 0000000000000000000000000000000000000000..146f643f8c1f014eef79095107ff8de027127c9d --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/one_hot.py @@ -0,0 +1,34 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +def one_hot(indices, depth, values, axis): + indices_shape = indices.shape + rank = len(indices.shape) + real_axis = axis + if axis < 0: + real_axis = axis + rank + 1 + depth_range = paddle.arange(end=depth) + ls = tuple(indices_shape[0: real_axis]) + rs = tuple(indices_shape[real_axis: rank]) + targets = paddle.reshape(depth_range, (1,) * (real_axis-0) + tuple(depth_range.shape) + (1,) * (rank-real_axis)) + mod = paddle.mod(indices, depth) + v = paddle.reshape(mod, ls + (1,) + rs) + out = targets == v + out = paddle.cast(out, "float32") + on_value = paddle.slice(values, axes=[0], starts=[1], ends=[2]) + off_value = paddle.slice(values, axes=[0], starts=[0], ends=[1]) + out = out * (on_value - off_value) + off_value + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim2.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim2.py new file mode 100644 index 0000000000000000000000000000000000000000..59af5987039ab962f30b6f25626f365ba76e6dfa --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim2.py @@ -0,0 +1,30 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +def pad_all_dim2(x, pad, value, mode): + pad = paddle.reshape(pad, shape=[2, -1]) + pad = paddle.transpose(pad, perm=[1, 0]) + pad = paddle.reverse(pad, axis=[0]) + pad = paddle.flatten(pad) + pad = paddle.cast(pad, dtype="int32") + x = paddle.unsqueeze(x, axis=[0, 1]) + out = paddle.nn.functional.pad(x=x, + pad=pad, + mode=mode, + data_format='NCHW', + value=value) + out = paddle.squeeze(out, axis=[0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4.py new file mode 100644 index 0000000000000000000000000000000000000000..df8c42c3379baa5f0b7f88b4085cb2880a4b396f --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4.py @@ -0,0 +1,36 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +def pad_all_dim4(x, pad, value, mode): + pad = paddle.reshape(pad, shape=[2, -1]) + pad = paddle.transpose(pad, perm=[1, 0]) + pad = paddle.reverse(pad, axis=[0]) + pad = paddle.flatten(pad) + pad = paddle.cast(pad, dtype="int32") + pad1, pad2 = paddle.split(pad, num_or_sections=2, axis=0) + x = paddle.nn.functional.pad(x=x, + pad=pad1, + mode=mode, + data_format='NCHW', + value=value) + x = paddle.transpose(x, perm=[2, 3, 0, 1]) + x = paddle.nn.functional.pad(x=x, + pad=pad2, + mode=mode, + data_format='NCHW', + value=value) + out = paddle.transpose(x, perm=[2, 3, 0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py new file mode 100644 index 0000000000000000000000000000000000000000..796b9f366cb1666abe88f07462d60b045e396a35 --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py @@ -0,0 +1,30 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +def pad_all_dim4_one_input(x, pad, value, mode): + x = paddle.nn.functional.pad(x=x, + pad=pad[0: 4], + mode=mode, + data_format='NCHW', + value=value) + x = paddle.transpose(x, perm=[2, 3, 0, 1]) + x = paddle.nn.functional.pad(x=x, + pad=pad[4: 9], + mode=mode, + data_format='NCHW', + value=value) + out = paddle.transpose(x, perm=[2, 3, 0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_two_input.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_two_input.py new file mode 100644 index 0000000000000000000000000000000000000000..dfcbe49f020907462fd0da8a433089bced11ca95 --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_two_input.py @@ -0,0 +1,28 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +def pad_with_two_input(x, pad, value, mode, data_format): + pad = paddle.reshape(pad, shape=[2, -1]) + pad = paddle.transpose(pad, perm=[1, 0]) + pad = paddle.reverse(pad, axis=[0]) + pad = paddle.flatten(pad) + pad = paddle.cast(pad, dtype="int32") + out = paddle.nn.functional.pad(x=x, + pad=pad, + value=value, + mode=mode, + data_format=data_format) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py index 2f94eb263dcb16175d549cdcb4f3528960fe6504..cd2be216883a599243cc730b73bdf1fd562529d9 100644 --- a/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py @@ -106,6 +106,9 @@ class OpSet9(): 'ReduceMax': ['paddle.max', dict(axes='axis', keepdims='keepdim'), dict(keepdim=1)], + 'ReduceProd': ['paddle.prod', + dict(axes='axis', keepdims='keepdim'), + dict(keepdim=1)], # active function 'Relu': ['paddle.nn.functional.relu'], 'LeakyRelu': ['paddle.nn.functional.leaky_relu', @@ -203,7 +206,7 @@ class OpSet9(): node = parameter dtype = node.dtype shape = node.out_shapes[0] - if len(node.weight.shape) == 0: + if hasattr(node.weight, "shape") and len(node.weight.shape) == 0: self.paddle_graph.add_layer( "paddle.full", inputs={}, @@ -286,6 +289,10 @@ class OpSet9(): attrs.update({"align_corners": False, "mode": string(mode), "align_mode": 1}) + val_x_shape = val_x.out_shapes[0] + if mode == "linear" and len(val_x_shape) == 4: + attrs["mode"] = string("bilinear") + attrs["align_corners"] = True self.paddle_graph.add_layer( kernel="paddle.nn.functional.interpolate", inputs=inputs, @@ -368,61 +375,136 @@ class OpSet9(): def Pad(self, node, op_independent=True): val_x = self.graph.get_input_node(node, idx=0, copy=True) pads = node.get_attr('pads') + is_pads_attr = True + if pads is None: + val_pad = self.graph.get_input_node(node, idx=1, copy=True) + pad_shape = val_pad.out_shapes[0] + is_pads_attr = False + pads = _const_weight_or_none(val_pad) + if pads is not None: + is_pads_attr = True mode = node.get_attr('mode', 'constant') value = node.get_attr('value', 0.) data_shape = val_x.out_shapes[0] output_shape = node.out_shapes[0] - assume_pad2d = False + assume_pad = False layer_attrs = {} layer_attrs['mode'] = string(mode) - paddings = [] - if len(pads) == 4: - assume_pad2d |= mode != 'constant' - if data_shape: - assume_pad2d |= data_shape and len(data_shape) == 4 # NCHW - if output_shape: - assume_pad2d |= output_shape and len(output_shape) == 4 # NCHW - if assume_pad2d: - paddle_op = 'paddle.nn.functional.pad' - layer_attrs['data_format'] = string('NCHW') - layer_attrs['value'] = value + layer_attrs['value'] = value + if not op_independent: + output_name = node.name + '_paded' else: - paddle_op = 'paddle.fluid.layers.pad' - layer_attrs["pad_value"] = value - if len(pads) == 4: - paddings = np.array(pads).reshape( - (-1, 2)).transpose().flatten().tolist() # SSEE -> SESE - elif len(pads) == 8: - paddings = np.array(pads).reshape( - (-1, 4)).transpose().flatten().tolist() # SSEE -> SESE - if sum(paddings[:4]) == 0: - paddle_op = 'paddle.nn.functional.pad' - paddings = paddings[4:] - layer_attrs['value'] = value - if 'pad_value' in layer_attrs: - layer_attrs.pop('pad_value') - tmp_paddings = copy.deepcopy(paddings) - paddings[0] = tmp_paddings[2] - paddings[1] = tmp_paddings[3] - paddings[2] = tmp_paddings[0] - paddings[3] = tmp_paddings[1] - if paddle_op == 'paddle.nn.functional.pad': - layer_attrs['pad'] = paddings - else: - layer_attrs['paddings'] = paddings - if op_independent: + output_name = node.name + layer_outputs = [output_name] + if is_pads_attr: + paddings = [] + paddle_op = 'paddle.nn.functional.pad' + if len(pads) in [2, 4, 6]: + if data_shape: + assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * (len(output_shape) - 2) == len(pads) # NCHW + if assume_pad: + if len(pads) == 2: + data_format = "NCL" + elif len(pads) == 4: + data_format = "NCHW" + else: + data_format = "NCDHW" + + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32") + paddings = np.flip(paddings, axis=0).flatten().tolist() + layer_attrs['pad'] = paddings + layer_attrs['data_format'] = data_format + else: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW + if assume_pad: + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32").flatten().tolist() + layer_attrs['pad'] = paddings + else: + raise Exception("The padding value {} is wrong!".format(pads)) + elif len(pads) == 8: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW + if assume_pad: + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32") + paddings = np.flip(paddings, axis=0).flatten().tolist() + if sum(paddings[:4]) == 0: + paddings = paddings[4:] + layer_attrs['pad'] = paddings + else: + layer_attrs['pad'] = paddings + paddle_op = "custom_layer:pad_all_dim4_one_input" + else: + raise Exception("The padding value {} is wrong!".format(pads)) self.paddle_graph.add_layer( paddle_op, inputs={'x': val_x.name}, - outputs=[node.name], + outputs=layer_outputs, **layer_attrs) + if not op_independent: + return node.name + '_paded' else: - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x.name}, - outputs=[node.name + '_paded'], - **layer_attrs) - return node.name + '_paded' + pads_len = val_pad.out_shapes[0][0] + if pads_len in [2, 4, 6]: + if data_shape: + assume_pad |= data_shape and 2 * (len(data_shape) - 2) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * (len(output_shape) - 2) == pads_len # NCHW + if assume_pad: + if pads_len == 2: + data_format = "NCL" + elif pads_len == 4: + data_format = "NCHW" + else: + data_format = "NCDHW" + self.paddle_graph.add_layer( + "custom_layer:pad_with_two_input", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode), + data_format=string(data_format)) + else: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW + if assume_pad: + if pads_len == 4: + self.paddle_graph.add_layer( + "custom_layer:pad_all_dim2", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode)) + else: + raise Exception("The padding value is wrong!") + elif pads_len == 8: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW + if assume_pad: + self.paddle_graph.add_layer( + "custom_layer:pad_all_dim4", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode)) + else: + print(pads_len) + raise Exception("The padding value is wrong!") + if not op_independent: + return node.name + '_paded' @print_mapping_info def Unsqueeze(self, node): @@ -622,17 +704,13 @@ class OpSet9(): self.paddle_graph.add_layer( 'paddle.cast', inputs={"x": indices.name}, - outputs=indices_cast, + outputs=[indices_cast], dtype=string('int64')) - op_name = name_generator("embedding", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] self.paddle_graph.add_layer( - 'paddle.nn.Embedding', - inputs={"x": indices_cast}, - outputs=layer_outputs, - param_attr=string(val_x.name), - size=val_x.out_shapes[0]) + 'paddle.nn.functional.embedding', + inputs={"x": indices_cast, + "weight": val_x.name}, + outputs=[node.name]) else: from functools import reduce reshape_shape = reduce(lambda x, y: x * y, indices_shape) @@ -804,20 +882,27 @@ class OpSet9(): starts = self.graph.get_input_node(node, idx=1, copy=True) ends = self.graph.get_input_node(node, idx=2, copy=True) starts_value = _const_weight_or_none(starts) + if starts_value is not None: + starts_value = starts_value.tolist() ends_value = _const_weight_or_none(ends) - + if ends_value is not None: + ends_value = ends_value.tolist() + if len(node.inputs) > 2: + s_len = len(val_x.out_shapes[0]) + axes = list(range(s_len)) if len(node.inputs) > 3: - axes = self.graph.get_input_node(node, idx=3, copy=True) - axes = _const_weight_or_none(axes, necessary=True) + axes_node = self.graph.get_input_node(node, idx=3, copy=True) + axes = _const_weight_or_none(axes_node, necessary=True).tolist() if len(node.inputs) > 4: steps = self.graph.get_input_node(node, idx=4, copy=True) - steps = _const_weight_or_none(steps) + steps = _const_weight_or_none(steps).tolist() + layer_attrs = { "axes": axes, "starts": starts.name, "ends": ends.name } - if starts_value is not None and ends_value is not None: + if starts_value is not None and ends_value is not None and axes is not None: starts_value = starts_value.copy() ends_value = ends_value.copy() #for idx in range(len(ends_value)): @@ -847,6 +932,8 @@ class OpSet9(): layer_attrs['starts'] = starts_cast if ends.dtype != 'int32': ends_cast = ends.name + '_cast' + else: + ends_cast = ends.name self.paddle_graph.add_layer( 'paddle.cast', inputs={"x": ends.name}, @@ -862,6 +949,7 @@ class OpSet9(): ends[idx] = 2**31 - 1 layer_attrs = {"axes": axes, "starts": starts, "ends": ends} + if steps is not None: layer_attrs['strides'] = steps self.paddle_graph.add_layer( @@ -986,11 +1074,17 @@ class OpSet9(): inputs={'x': val_shape.name}, outputs=[val_shape.name], shape=val_shape.out_shapes[0]) + if val_shape.dtype != "int32": + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={'x': val_shape.name}, + outputs=[val_shape.name], + dtype=string("int32")) self.paddle_graph.add_layer( 'paddle.reshape', inputs={'x': val_x.name, 'shape': val_shape.name}, - outputs=node) + outputs=[node.name]) @print_mapping_info def Cast(self, node): @@ -1221,7 +1315,10 @@ class OpSet9(): @print_mapping_info def Transpose(self, node): val_x = self.graph.get_input_node(node, idx=0, copy=True) - perm = node.get_attr('perm') + s_len = len(val_x.out_shapes[0]) + perm_default = list(range(s_len)) + perm_default.reverse() + perm = node.get_attr('perm', perm_default) self.paddle_graph.add_layer( "paddle.transpose", inputs={"x": val_x.name}, @@ -1230,9 +1327,6 @@ class OpSet9(): @print_mapping_info def PRelu(self, node): - op_name = name_generator("prelu", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] val_x = self.graph.get_input_node(node, idx=0, copy=True) val_slope = self.graph.get_input_node(node, idx=1, copy=True) @@ -1240,20 +1334,27 @@ class OpSet9(): shape_slope = val_slope.out_shapes[0] if shape_slope == [1]: mode = 'all' - elif len(shape_slope) > 2: - raise Exception("The 'element' mode is not supported yet!") - - if mode == 'channel' and len(shape_slope) == 1: - # paddle params shape need be [1, channel] - slope_data = _const_weight_or_none(val_slope) - slope_data = np.reshape(slope_data, [1] + shape_slope) - self.params[val_slope.name] = slope_data - - self.paddle_graph.add_layer( - "paddle.nn.functional.prelu", - inputs={"x": val_x.name, - "weight": val_slope.name}, - outputs=[node.name]) + + if mode == "element": + self.paddle_graph.add_layer( + "paddle.static.nn.prelu", + inputs={"x": val_x.name, + "param_attr": val_slope.name}, + outputs=[node.name], + mode="element") + else: + if mode == 'channel': + if len(shape_slope) > 1: + self.paddle_graph.add_layer( + "paddle.reshape", + inputs={"x": val_slope.name}, + outputs=[val_slope.name], + shape=[shape_slope[0]]) + self.paddle_graph.add_layer( + "paddle.nn.functional.prelu", + inputs={"x": val_x.name, + "weight": val_slope.name}, + outputs=[node.name]) @print_mapping_info def Squeeze(self, node): @@ -1521,6 +1622,16 @@ class OpSet9(): } if has_bias: layer_inputs["bias"] = val_b.name + input_shape = val_x.out_shapes[0] + if reduce(lambda x,y:x*y, input_shape) in [1, -1] and 1 not in input_shape: + input_shape[1] = num_in_channels * num_groups + input_shape[0] = 0 + input_shape[2] = 0 + self.paddle_graph.add_layer( + "paddle.reshape", + inputs={"x": layer_inputs["x"]}, + outputs=[layer_inputs["x"]], + shape=input_shape) self.paddle_graph.add_layer( paddle_op, inputs=layer_inputs, @@ -1588,4 +1699,62 @@ class OpSet9(): inputs={"x": val_x.name}, outputs=[node.name], **layer_attrs) + + @print_mapping_info + def Size(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.shape", + inputs={"input": val_x.name}, + outputs=[node.name]) + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": node.name}, + outputs=[node.name], + dtype=string('int64')) + self.paddle_graph.add_layer( + "paddle.prod", + inputs={"x": node.name}, + outputs=[node.name]) + @print_mapping_info + def Sign(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + if node.dtype not in ["float16", "float32", "float64"]: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": val_x.name}, + outputs=[val_x.name], + dtype=string("float32")) + self.paddle_graph.add_layer( + "paddle.sign", + inputs={"x": val_x.name}, + outputs=[node.name]) + if node.dtype not in ["float16", "float32", "float64"]: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": node.name}, + outputs=[node.name], + dtype=string(node.dtype)) + + @print_mapping_info + def OneHot(self, node): + indices = self.graph.get_input_node(node, idx=0, copy=True) + depth = self.graph.get_input_node(node, idx=1, copy=True) + values = self.graph.get_input_node(node, idx=2, copy=True) + axis = node.get_attr('axis', -1) + self.paddle_graph.add_layer( + "custom_layer:one_hot", + inputs={"indices": indices.name, + "depth": depth.name, + "values": values.name}, + outputs=[node.name], + axis=axis) + + @print_mapping_info + def Reciprocal(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.reciprocal", + inputs={"x": val_x.name}, + outputs=[node.name]) diff --git a/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py b/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py index 20317792370bee14ba56691c531b0dc0d656c5ea..338b15e9ed03bbd973de175fddcc72aedb1b2745 100644 --- a/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py +++ b/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py @@ -625,32 +625,11 @@ class TFOpMapper(OpMapper): shape=out_shape.tolist()) def Pad(self, node): - input = self.graph.get_node(node.layer.input[0]) - paddings = self.graph.get_node(node.layer.input[1]) + input = self.graph.get_input_node(node, 0) + paddings = self.graph.get_input_node(node, 1) assert paddings.layer_type == "Const", "Padding should be Const" paddings = paddings.value.flatten().tolist() - if len(input.out_shapes[0]) == 4: - if paddings[0] + paddings[1] + paddings[6] + paddings[7] == 0: - new_padding = paddings[2:6] - transpose_name = gen_name("pad", "transpose") - self.paddle_graph.add_layer( - kernel="paddle.transpose", - inputs={"x": input.name}, - outputs=[transpose_name], - perm=[0, 3, 1, 2]) - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.pad", - inputs={"x": transpose_name}, - outputs=[node.name], - pad=new_padding) - self.paddle_graph.add_layer( - kernel="paddle.transpose", - inputs={"x": node.name}, - outputs=[node.name], - perm=[0, 2, 3, 1]) - return - self.paddle_graph.add_layer( kernel="paddle.nn.functional.pad", inputs={"x": input.name}, @@ -658,26 +637,11 @@ class TFOpMapper(OpMapper): pad=paddings) def MirrorPad(self, node): - input = self.graph.get_input_node(node, 0) - paddings = self.graph.get_input_node(node, 1) - assert paddings.layer_type == "Const", "Padding should be Const" - new_paddings = numpy.flip(paddings.value, 0).flatten().tolist() - transpose_name = gen_name("pad", "transpose") - self.paddle_graph.add_layer( - kernel="paddle.transpose", - inputs={"x": input.name}, - outputs=[transpose_name], - perm=[0, 3, 1, 2]) - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.pad".format(dim), - inputs={"x": transpose_name}, - outputs=[node.name], - pad=new_paddings) - self.paddle_graph.add_layer( - kernel="paddle.transpose", - inputs={"x": node.name}, - outputs=[node.name], - perm=[0, 2, 3, 1]) + self.Pad(node) + + + def PadV2(self, node): + self.Pad(node) def Squeeze(self, node): input = self.graph.get_input_node(node, 0)