diff --git a/x2paddle/core/program.py b/x2paddle/core/program.py index 9b7cd84ee925aff751cdaf9b0bf144249d06d3ce..0cc78c5b8caa4dcb08345a05b24364c60f3ec214 100644 --- a/x2paddle/core/program.py +++ b/x2paddle/core/program.py @@ -354,7 +354,7 @@ class PaddleGraph(object): remove_default_attrs(layer.kernel, layer.attrs) edges_in = self.edges_in.get(layer_id, []) edges_out = self.edges_out.get(layer_id, []) - if len(edges_in) == 0 and len(edges_out) == 0: + if len(edges_in) == 0 and len(edges_out) == 0 and layer.outputs[0] not in self.outputs: continue line = "" diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/__init__.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/__init__.py index 2bb406520e78a6282d8eb747e634aff1ab6faecd..887bb45cdd6c86a62c7cce68d99b3e0cf3328bd1 100644 --- a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/__init__.py +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/__init__.py @@ -16,4 +16,5 @@ from .one_hot import OneHot from .pad_two_input import PadWithTwoInput from .pad_all_dim2 import PadAllDim2 -from .pad_all_dim4 import PadAllDim4 \ No newline at end of file +from .pad_all_dim4 import PadAllDim4 +from .pad_all_dim4_one_input import PadAllDim4WithOneInput \ No newline at end of file diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py new file mode 100644 index 0000000000000000000000000000000000000000..9ad3048081a1dbb6dfe57cce2a235a8a57aa6c2f --- /dev/null +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py @@ -0,0 +1,32 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from x2paddle.core.util import * + +class PadAllDim4WithOneInput(object): + def __init__(self, pad, value, mode): + self.layer_attrs = {} + self.layer_attrs['mode'] = mode + self.layer_attrs['data_format'] = 'NCHW' + self.layer_attrs['value'] = value + self.pad1 = pad[0: 4] + self.pad2 = pad[4: 9] + + def __call__(self, x): + x = paddle.nn.functional.pad(x=x, pad=self.pad1, **self.layer_attrs) + x = paddle.transpose(x, perm=[2, 3, 0, 1]) + x = paddle.nn.functional.pad(x=x, pad=self.pad2, **self.layer_attrs) + out = paddle.transpose(x, perm=[2, 3, 0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py index 4311f50ab559cbdbd22a0c83b932b4e8a6c48ff8..bfeb120b6a30d2b6d2861715c7b9276ad8d4ab4f 100644 --- a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py @@ -415,7 +415,7 @@ class OpSet9(): paddle_op = 'paddle.nn.Pad{}D'.format(len(output_shape) - 2) paddings = np.array(pads).reshape( (2, -1)).transpose().astype("int32") - paddings = np.flip(paddings).flatten().tolist() + paddings = np.flip(paddings, axis=0).flatten().tolist() layer_attrs['padding'] = paddings else: if data_shape: @@ -435,10 +435,16 @@ class OpSet9(): if output_shape: assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW if assume_pad: - paddle_op = 'paddle.nn.functional.pad' + paddle_op = 'paddle.nn.Pad2D' paddings = np.array(pads).reshape( - (2, -1)).transpose().astype("int32").flatten().tolist() - layer_attrs['pad'] = paddings + (2, -1)).transpose().astype("int32") + paddings = np.flip(paddings, axis=0).flatten().tolist() + if sum(paddings[:4]) == 0: + paddings = paddings[4:] + layer_attrs['padding'] = paddings + else: + layer_attrs["pad"] = paddings + paddle_op = "custom_layer:PadAllDim4WithOneInput" else: raise Exception("The padding value {} is wrong!".format(pads)) self.paddle_graph.add_layer( diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/__init__.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/__init__.py index 3bd624a30ff6e81a9d49b1704fcfcf5a70dccb96..d8161ae1f8a76bff61a1cd8a23de15b3a1bf9549 100644 --- a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/__init__.py +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/__init__.py @@ -14,4 +14,7 @@ from .one_hot import one_hot -from .pad import custom_pad \ No newline at end of file +from .pad_two_input import pad_with_two_input +from .pad_all_dim2 import pad_all_dim2 +from .pad_all_dim4 import pad_all_dim4 +from .pad_all_dim4_one_input import pad_all_dim4_one_input \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/one_hot.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/one_hot.py index 67499dce20968bf502a63c002e723a26d93a94b7..146f643f8c1f014eef79095107ff8de027127c9d 100644 --- a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/one_hot.py +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/one_hot.py @@ -14,31 +14,18 @@ import paddle -def one_hot(self, indices, depth, values, axis): - indices_shape = paddle.shape(indices) - tmp = paddle.ones_like(indices_shape, dtype="int32") - rank = paddle.sum(tmp) +def one_hot(indices, depth, values, axis): + indices_shape = indices.shape + rank = len(indices.shape) + real_axis = axis + if axis < 0: + real_axis = axis + rank + 1 depth_range = paddle.arange(end=depth) - zero = paddle.zeros([1], dtype="int32") - one = paddle.ones([1], dtype="int32") - axis = axis * one - new_axis = axis + rank + 1 - cond = paddle.less_than(axis, zero) - real_axis = paddle.where(cond, new_axis, axis) - ls = paddle.slice(indices_shape, axes=[0], starts=[0], ends=real_axis) - rs = paddle.slice(indices_shape, axes=[0], starts=real_axis, ends=rank) - tmp = paddle.ones_like(ls, dtype="int32") - ls_len = paddle.sum(tmp) - ls_list = paddle.ones(ls_len, dtype="int32") - tmp = paddle.ones_like(rs, dtype="int32") - rs_len = paddle.sum(tmp) - rs_list = paddle.ones(rs_len, dtype="int32") - depth_range_shape = paddle.shape(depth_range) - targets_shape = paddle.concat([ls_list, depth_range_shape, rs_list], axis=0) - targets = paddle.reshape(depth_range, targets_shape) + ls = tuple(indices_shape[0: real_axis]) + rs = tuple(indices_shape[real_axis: rank]) + targets = paddle.reshape(depth_range, (1,) * (real_axis-0) + tuple(depth_range.shape) + (1,) * (rank-real_axis)) mod = paddle.mod(indices, depth) - v_shape = paddle.concat([ls, paddle.shape(one), rs], axis=0) - v = paddle.reshape(mod, v_shape) + v = paddle.reshape(mod, ls + (1,) + rs) out = targets == v out = paddle.cast(out, "float32") on_value = paddle.slice(values, axes=[0], starts=[1], ends=[2]) diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim2.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim2.py new file mode 100644 index 0000000000000000000000000000000000000000..59af5987039ab962f30b6f25626f365ba76e6dfa --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim2.py @@ -0,0 +1,30 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +def pad_all_dim2(x, pad, value, mode): + pad = paddle.reshape(pad, shape=[2, -1]) + pad = paddle.transpose(pad, perm=[1, 0]) + pad = paddle.reverse(pad, axis=[0]) + pad = paddle.flatten(pad) + pad = paddle.cast(pad, dtype="int32") + x = paddle.unsqueeze(x, axis=[0, 1]) + out = paddle.nn.functional.pad(x=x, + pad=pad, + mode=mode, + data_format='NCHW', + value=value) + out = paddle.squeeze(out, axis=[0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4.py new file mode 100644 index 0000000000000000000000000000000000000000..df8c42c3379baa5f0b7f88b4085cb2880a4b396f --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4.py @@ -0,0 +1,36 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +def pad_all_dim4(x, pad, value, mode): + pad = paddle.reshape(pad, shape=[2, -1]) + pad = paddle.transpose(pad, perm=[1, 0]) + pad = paddle.reverse(pad, axis=[0]) + pad = paddle.flatten(pad) + pad = paddle.cast(pad, dtype="int32") + pad1, pad2 = paddle.split(pad, num_or_sections=2, axis=0) + x = paddle.nn.functional.pad(x=x, + pad=pad1, + mode=mode, + data_format='NCHW', + value=value) + x = paddle.transpose(x, perm=[2, 3, 0, 1]) + x = paddle.nn.functional.pad(x=x, + pad=pad2, + mode=mode, + data_format='NCHW', + value=value) + out = paddle.transpose(x, perm=[2, 3, 0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py new file mode 100644 index 0000000000000000000000000000000000000000..796b9f366cb1666abe88f07462d60b045e396a35 --- /dev/null +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_all_dim4_one_input.py @@ -0,0 +1,30 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + +def pad_all_dim4_one_input(x, pad, value, mode): + x = paddle.nn.functional.pad(x=x, + pad=pad[0: 4], + mode=mode, + data_format='NCHW', + value=value) + x = paddle.transpose(x, perm=[2, 3, 0, 1]) + x = paddle.nn.functional.pad(x=x, + pad=pad[4: 9], + mode=mode, + data_format='NCHW', + value=value) + out = paddle.transpose(x, perm=[2, 3, 0, 1]) + return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad.py b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_two_input.py similarity index 68% rename from x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad.py rename to x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_two_input.py index 3b678a1d59ad91f415f9cf785e7ea5cc56dced3c..dfcbe49f020907462fd0da8a433089bced11ca95 100644 --- a/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad.py +++ b/x2paddle/op_mapper/static/onnx2paddle/onnx_custom_layer/pad_two_input.py @@ -14,14 +14,15 @@ import paddle -def custom_pad(self, x, pad, value, mode): - layer_attrs = {} - layer_attrs['mode'] = string(mode) - layer_attrs['data_format'] = string('NCHW') - layer_attrs['value'] = value +def pad_with_two_input(x, pad, value, mode, data_format): pad = paddle.reshape(pad, shape=[2, -1]) pad = paddle.transpose(pad, perm=[1, 0]) pad = paddle.reverse(pad, axis=[0]) pad = paddle.flatten(pad) - out = paddle.nn.functional.pad(x=x, pad=pad, **self.layer_attrs) + pad = paddle.cast(pad, dtype="int32") + out = paddle.nn.functional.pad(x=x, + pad=pad, + value=value, + mode=mode, + data_format=data_format) return out \ No newline at end of file diff --git a/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py index e82f704c6be1789fedad3da7d9458956da3bafa9..aab454088319d0019712fb2c90c5f41ed36366ab 100644 --- a/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py @@ -106,6 +106,9 @@ class OpSet9(): 'ReduceMax': ['paddle.max', dict(axes='axis', keepdims='keepdim'), dict(keepdim=1)], + 'ReduceProd': ['paddle.prod', + dict(axes='axis', keepdims='keepdim'), + dict(keepdim=1)], # active function 'Relu': ['paddle.nn.functional.relu'], 'LeakyRelu': ['paddle.nn.functional.leaky_relu', @@ -380,73 +383,122 @@ class OpSet9(): value = node.get_attr('value', 0.) data_shape = val_x.out_shapes[0] output_shape = node.out_shapes[0] - assume_pad2d = False + assume_pad = False layer_attrs = {} layer_attrs['mode'] = string(mode) + layer_attrs['value'] = value + if not op_independent: + output_name = node.name + '_paded' + else: + output_name = node.name + layer_outputs = [output_name] if is_pads_attr: paddings = [] - if len(pads) == 4: - assume_pad2d |= mode != 'constant' + paddle_op = 'paddle.nn.functional.pad' + if len(pads) in [2, 4, 6]: if data_shape: - assume_pad2d |= data_shape and len(data_shape) == 4 # NCHW + assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW if output_shape: - assume_pad2d |= output_shape and len(output_shape) == 4 # NCHW - if assume_pad2d: - paddle_op = 'paddle.nn.functional.pad' - layer_attrs['data_format'] = string('NCHW') - layer_attrs['value'] = value - else: - paddle_op = 'paddle.fluid.layers.pad' - layer_attrs["pad_value"] = value - if len(pads) == 4: - paddings = np.array(pads).reshape( - (-1, 2)).transpose().flatten().tolist() # SSEE -> SESE + assume_pad |= output_shape and 2 * (len(output_shape) - 2) == len(pads) # NCHW + if assume_pad: + if len(pads) == 2: + data_format = "NCL" + elif len(pads) == 4: + data_format = "NCHW" + else: + data_format = "NCDHW" + + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32") + paddings = np.flip(paddings, axis=0).flatten().tolist() + layer_attrs['pad'] = paddings + layer_attrs['data_format'] = data_format + else: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW + if assume_pad: + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32").flatten().tolist() + layer_attrs['pad'] = paddings + else: + raise Exception("The padding value {} is wrong!".format(pads)) elif len(pads) == 8: - paddings = np.array(pads).reshape( - (-1, 4)).transpose().flatten().tolist() # SSEE -> SESE - if sum(paddings[:4]) == 0: - paddle_op = 'paddle.nn.functional.pad' - paddings = paddings[4:] - layer_attrs['value'] = value - if 'pad_value' in layer_attrs: - layer_attrs.pop('pad_value') - tmp_paddings = copy.deepcopy(paddings) - paddings[0] = tmp_paddings[2] - paddings[1] = tmp_paddings[3] - paddings[2] = tmp_paddings[0] - paddings[3] = tmp_paddings[1] - if paddle_op == 'paddle.nn.functional.pad': - layer_attrs['pad'] = paddings - else: - layer_attrs['paddings'] = paddings - if op_independent: - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x.name}, - outputs=[node.name], - **layer_attrs) + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW + if assume_pad: + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32") + paddings = np.flip(paddings, axis=0).flatten().tolist() + if sum(paddings[:4]) == 0: + paddings = paddings[4:] + layer_attrs['pad'] = paddings + else: + layer_attrs['pad'] = paddings + paddle_op = "custom_layer:pad_all_dim4_one_input" else: - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x.name}, - outputs=[node.name + '_paded'], - **layer_attrs) + raise Exception("The padding value {} is wrong!".format(pads)) + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': val_x.name}, + outputs=layer_outputs, + **layer_attrs) + if not op_independent: return node.name + '_paded' else: - if pad_shape[0] == 4: - assume_pad2d |= mode != 'constant' + pads_len = val_pad.out_shapes[0][0] + if pads_len in [2, 4, 6]: if data_shape: - assume_pad2d |= data_shape and len(data_shape) == 4 # NCHW + assume_pad |= data_shape and 2 * (len(data_shape) - 2) == pads_len # NCHW if output_shape: - assume_pad2d |= output_shape and len(output_shape) == 4 # NCHW - if pad_shape[0] == 8 or not assume_pad2d: - raise Exception("When the pad shape is 8 and pad is tensor, the op is not supported yet!") - layer_attrs['value'] = value - self.paddle_graph.add_layer( - "custom_layer:custom_pad", - inputs={'x': val_x.name, 'pad': val_pad.name}, - outputs=[node.name + '_paded'], - **layer_attrs) + assume_pad |= output_shape and 2 * (len(output_shape) - 2) == pads_len # NCHW + if assume_pad: + if pads_len == 2: + data_format = "NCL" + elif pads_len == 4: + data_format = "NCHW" + else: + data_format = "NCDHW" + self.paddle_graph.add_layer( + "custom_layer:pad_with_two_input", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode), + data_format=string(data_format)) + else: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW + if assume_pad: + if pads_len == 4: + self.paddle_graph.add_layer( + "custom_layer:pad_all_dim2", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode)) + else: + raise Exception("The padding value is wrong!") + elif pads_len == 8: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW + if assume_pad: + self.paddle_graph.add_layer( + "custom_layer:pad_all_dim4", + inputs={'x': val_x.name, 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode)) + else: + print(pads_len) + raise Exception("The padding value is wrong!") if not op_independent: return node.name + '_paded' @@ -650,15 +702,11 @@ class OpSet9(): inputs={"x": indices.name}, outputs=[indices_cast], dtype=string('int64')) - op_name = name_generator("embedding", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] self.paddle_graph.add_layer( - 'paddle.nn.Embedding', - inputs={"x": indices_cast}, - outputs=layer_outputs, - param_attr=string(val_x.name), - size=val_x.out_shapes[0]) + 'paddle.nn.functional.embedding', + inputs={"x": indices_cast, + "weight": val_x.name}, + outputs=[node.name]) else: from functools import reduce reshape_shape = reduce(lambda x, y: x * y, indices_shape) @@ -830,14 +878,21 @@ class OpSet9(): starts = self.graph.get_input_node(node, idx=1, copy=True) ends = self.graph.get_input_node(node, idx=2, copy=True) starts_value = _const_weight_or_none(starts) + if starts_value is not None: + starts_value = starts_value.tolist() ends_value = _const_weight_or_none(ends) - + if ends_value is not None: + ends_value = ends_value.tolist() + if len(node.inputs) > 2: + s_len = len(val_x.out_shapes[0]) + axes = list(range(s_len)) if len(node.inputs) > 3: - axes = self.graph.get_input_node(node, idx=3, copy=True) - axes = _const_weight_or_none(axes, necessary=True) + axes_node = self.graph.get_input_node(node, idx=3, copy=True) + axes = _const_weight_or_none(axes_node, necessary=True).tolist() if len(node.inputs) > 4: steps = self.graph.get_input_node(node, idx=4, copy=True) - steps = _const_weight_or_none(steps) + steps = _const_weight_or_none(steps).tolist() + layer_attrs = { "axes": axes, "starts": starts.name, @@ -873,6 +928,8 @@ class OpSet9(): layer_attrs['starts'] = starts_cast if ends.dtype != 'int32': ends_cast = ends.name + '_cast' + else: + ends_cast = ends.name self.paddle_graph.add_layer( 'paddle.cast', inputs={"x": ends.name}, @@ -888,6 +945,7 @@ class OpSet9(): ends[idx] = 2**31 - 1 layer_attrs = {"axes": axes, "starts": starts, "ends": ends} + if steps is not None: layer_attrs['strides'] = steps self.paddle_graph.add_layer( @@ -1012,6 +1070,12 @@ class OpSet9(): inputs={'x': val_shape.name}, outputs=[val_shape.name], shape=val_shape.out_shapes[0]) + if val_shape.dtype != "int32": + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={'x': val_shape.name}, + outputs=[val_shape.name], + dtype=string("int32")) self.paddle_graph.add_layer( 'paddle.reshape', inputs={'x': val_x.name, @@ -1247,7 +1311,10 @@ class OpSet9(): @print_mapping_info def Transpose(self, node): val_x = self.graph.get_input_node(node, idx=0, copy=True) - perm = node.get_attr('perm') + s_len = len(val_x.out_shapes[0]) + perm_default = list(range(s_len)) + perm_default.reverse() + perm = node.get_attr('perm', perm_default) self.paddle_graph.add_layer( "paddle.transpose", inputs={"x": val_x.name}, @@ -1620,8 +1687,13 @@ class OpSet9(): val_x = self.graph.get_input_node(node, idx=0, copy=True) self.paddle_graph.add_layer( "paddle.shape", - inputs={"x": val_x.name}, + inputs={"input": val_x.name}, outputs=[node.name]) + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": node.name}, + outputs=[node.name], + dtype=string('int64')) self.paddle_graph.add_layer( "paddle.prod", inputs={"x": node.name}, @@ -1630,10 +1702,22 @@ class OpSet9(): @print_mapping_info def Sign(self, node): val_x = self.graph.get_input_node(node, idx=0, copy=True) + if node.dtype not in ["float16", "float32", "float64"]: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": val_x.name}, + outputs=[val_x.name], + dtype=string("float32")) self.paddle_graph.add_layer( "paddle.sign", inputs={"x": val_x.name}, outputs=[node.name]) + if node.dtype not in ["float16", "float32", "float64"]: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": node.name}, + outputs=[node.name], + dtype=string(node.dtype)) @print_mapping_info def OneHot(self, node):