From 2721c0a927d95b75120afb3f2bdb5389022c2a09 Mon Sep 17 00:00:00 2001 From: SunAhong1993 Date: Tue, 11 May 2021 10:43:04 +0800 Subject: [PATCH] release --- README.md | 27 +- x2paddle/__init__.py | 2 +- x2paddle/convert.py | 3 +- .../dygraph/tf2paddle/tf_op_mapper.py | 357 +++++++++++------- .../static/tf2paddle/tf_op_mapper.py | 334 ++++++++++------ 5 files changed, 457 insertions(+), 266 deletions(-) diff --git a/README.md b/README.md index d0fd2d7..16ae74c 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ X2Paddle的架构设计着重考虑了对多深度学习框架的的支持以及 - pytorch:torch >=1.5.0 (script方式暂不支持1.7.0) ## 安装 -### 方式一:源码安装(推荐) +### 方式一:源码安装 ``` git clone https://github.com/PaddlePaddle/X2Paddle.git cd X2Paddle @@ -41,7 +41,7 @@ git checkout develop python setup.py install ``` -### 方式二:pip安装 +### 方式二:pip安装(推荐) 我们会定期更新pip源上的x2paddle版本 ``` pip install x2paddle --index https://pypi.python.org/simple/ @@ -95,10 +95,8 @@ X2Paddle提供了工具解决如下问题,详见[tools/README.md](tools/README 4. [X2Paddle添加内置的Caffe自定义层](./docs/user_guides/add_caffe_custom_layer.md) 5. [转换后PaddlePaddle预测模型简介](./docs/user_guides/pd_folder_introduction.py) 6. [Paddle到ONNX的转换](https://github.com/PaddlePaddle/Paddle2ONNX) - -## 支持列表文档 -1. [X2Paddle测试模型库](./docs/introduction/x2paddle_model_zoo.md) -2. [X2Paddle支持的op列表](./docs/introduction/op_list.md) +7. [X2Paddle测试模型库](./docs/introduction/x2paddle_model_zoo.md) +8. [X2Paddle支持的op列表](./docs/introduction/op_list.md) ## 转换教程 @@ -106,14 +104,21 @@ X2Paddle提供了工具解决如下问题,详见[tools/README.md](tools/README 2. [PyTorch预测模型转换教程](./docs/demo/pytorch2paddle.ipynb) ## 更新历史 -2020.12.09 -1. 新增PyTorch2Paddle转换方式,转换得到Paddle动态图代码,并动转静获得inference_model。 - 方式一:trace方式,转换后的代码有模块划分,每个模块的功能与PyTorch相同。 - 方式二:script方式,转换后的代码按执行顺序逐行出现。 +2020.12.09 +1. 新增PyTorch2Paddle转换方式,转换得到Paddle动态图代码,并动转静获得inference_model。 + 方式一:trace方式,转换后的代码有模块划分,每个模块的功能与PyTorch相同。 + 方式二:script方式,转换后的代码按执行顺序逐行出现。 2. 新增Caffe/ONNX/Tensorflow到Paddle动态图的转换。 -3. 新增TensorFlow op(14个):Neg、Greater、FloorMod、LogicalAdd、Prd、Equal、Conv3D、Ceil、AddN、DivNoNan、Where、MirrorPad、Size、TopKv2 +3. 新增TensorFlow op映射(14个):Neg、Greater、FloorMod、LogicalAdd、Prd、Equal、Conv3D、Ceil、AddN、DivNoNan、Where、MirrorPad、Size、TopKv2。 4. 新增Optimizer模块,主要包括op融合、op消除功能,转换后的代码可读性更强,进行预测时耗时更短。 +2021.04.30 +1. 新增支持转换的模型:[SwinTransformer](https://github.com/microsoft/Swin-Transformer/)、[BASNet](https://github.com/xuebinqin/BASNet)、[DBFace](https://github.com/dlunion/DBFace)、[EasyOCR](https://github.com/JaidedAI/EasyOCR)、[CifarNet](https://github.com/tensorflow/models/blob/master/research/slim/nets/cifarnet.py)等。 +2. 支持Windows上使用本工具。 +3. 新增TensorFlow op映射(4个):SplitV、ReverseV2、BatchToSpaceND、SpaceToBatchND。 +4. 新增PyTorch op映射(11个):aten::index、aten::roll、aten::adaptive_avg_pool1d、aten::reflection_pad2d、aten::reflection_pad1d、aten::instance_norm、aten::gru、aten::norm、aten::clamp_min、aten:prelu、aten:split_with_sizes。 +5. 新增ONNX op映射(1个):DepthToSpace。 +6. 新增Caffe op映射(1个):op:MemoryData。 ## 贡献代码 diff --git a/x2paddle/__init__.py b/x2paddle/__init__.py index b1d0700..b124fb6 100644 --- a/x2paddle/__init__.py +++ b/x2paddle/__init__.py @@ -1,4 +1,4 @@ -__version__ = "1.0.2" +__version__ = "1.1.0" from .core.program import PaddleGraph diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 613ba65..8d78cc4 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -41,7 +41,6 @@ def arg_parser(): parser.add_argument( "--save_dir", "-s", - required=True, type=_text_type, default=None, help="path to save translated model") @@ -221,6 +220,8 @@ def main(): x2paddle.__version__)) return + assert args.save_dir is not None, "--save_dir is not defined" + try: import platform v0, v1, v2 = platform.python_version().split('.') diff --git a/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py b/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py index d3a3381..a4f446f 100644 --- a/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py +++ b/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py @@ -13,7 +13,7 @@ # limitations under the License. from x2paddle.decoder.tf_decoder import TFGraph, TFGraphNode -from x2paddle.core.program import PaddleGraph +from x2paddle.core.program import PaddleGraph from x2paddle.core.op_mapper import OpMapper from x2paddle.core.util import * import traceback @@ -58,8 +58,7 @@ class TFOpMapper(OpMapper): 'swish_f32': ['paddle.nn.Swish'], 'Tanh': ['paddle.nn.Tanh'], 'Softplus': ['paddle.nn.Softplus'], - 'LeakyRelu': ['paddle.nn.LeakyReLU', - dict(alpha='negative_slope')], + 'LeakyRelu': ['paddle.nn.LeakyReLU', dict(alpha='negative_slope')], 'Softmax': ['paddle.nn.Softmax'], 'Floor': ['paddle.floor'], 'Erf': ['paddle.erf'], @@ -96,7 +95,8 @@ class TFOpMapper(OpMapper): self.nn_name2id = dict() self.input_index = 0 self.inputs_info = dict() - self.paddle_graph = PaddleGraph(parent_layer=None, graph_type="dygraph", source_type="tf") + self.paddle_graph = PaddleGraph( + parent_layer=None, graph_type="dygraph", source_type="tf") self.paddle_graph.outputs = self.graph.output_nodes not_placeholder = list() @@ -109,7 +109,7 @@ class TFOpMapper(OpMapper): not_placeholder.append(name) for name in not_placeholder: idx = self.graph.input_nodes.index(name) - del self.graph.input_nodes[idx] + del self.graph.input_nodes[idx] print("Total nodes: {}".format( sum([ @@ -134,7 +134,7 @@ class TFOpMapper(OpMapper): self.paddle_graph.set_name(self.graph.graph_name) self.paddle_graph.set_parameters(self.params) self.paddle_graph.set_inputs_info(self.inputs_info) - + def op_checker(self): unsupported_ops = set() for node_name in self.graph.topo_sort: @@ -149,11 +149,11 @@ class TFOpMapper(OpMapper): return True else: if len(unsupported_ops) > 0: - print("\n========= {} OPs are not supported yet ===========".format( - len(unsupported_ops))) + print("\n========= {} OPs are not supported yet ===========". + format(len(unsupported_ops))) for op in unsupported_ops: print("========== {} ============".format(op)) - return False + return False def directly_map(self, node): inputs = node.layer.input @@ -196,8 +196,11 @@ class TFOpMapper(OpMapper): inputs={"x": x.name, "y": y.name}, outputs=[node.name]) - self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape} - + self.paddle_graph.layers[layer_id].input_shapes = { + "x": x_shape, + "y": y_shape + } + def bool_map(self, node): op_type = self.bool_ops[node.layer_type] self.elementwise_map(node, op_type) @@ -208,7 +211,7 @@ class TFOpMapper(OpMapper): assert len(shape) != 0, "Unknown shape of input nodes[{}].".format( node.layer_name) dtype = node.dtype - + self.paddle_graph.add_layer( kernel="paddle.to_tensor", inputs={}, @@ -226,15 +229,15 @@ class TFOpMapper(OpMapper): if value == float('inf'): value = "float('inf')" self.paddle_graph.add_layer( - "paddle.full", - inputs={}, + "paddle.full", + inputs={}, outputs=[node.name], dtype=string(dtype), shape=[1], fill_value=value) return self.params[node.name] = node.value - + if 0 not in shape: self.paddle_graph.add_layer( "self.create_parameter", @@ -244,28 +247,27 @@ class TFOpMapper(OpMapper): attr=string(node.name), dtype=string(dtype), default_initializer="paddle.nn.initializer.Constant(value=0.0)") - + def Transpose(self, node): input = self.graph.get_input_node(node, 0) perm = self.graph.get_input_node(node, 1) if perm.layer_type == "Const": perm = perm.value.tolist() else: - perm = self.decoder.infer_tensor(perm, use_diff_inputs=False).tolist() - + perm = self.decoder.infer_tensor( + perm, use_diff_inputs=False).tolist() + self.paddle_graph.add_layer( "paddle.transpose", inputs={"x": input.name}, outputs=[node.name], perm=perm) - + def Where(self, node): if len(node.layer.input) == 1: cond = self.graph.get_input_node(node, 0) self.paddle_graph.add_layer( - "paddle.nonzero", - inputs={"x": cond.name}, - outputs=[node.name]) + "paddle.nonzero", inputs={"x": cond.name}, outputs=[node.name]) else: cond = self.graph.get_input_node(node, 0) x = self.graph.get_input_node(node, 1) @@ -276,10 +278,10 @@ class TFOpMapper(OpMapper): "x": x.name, "y": y.name}, outputs=[node.name]) - + def Neg(self, node): input = self.graph.get_input_node(node, 0) - + self.paddle_graph.add_layer( "paddle.scale", inputs={"x": input.name}, @@ -300,10 +302,7 @@ class TFOpMapper(OpMapper): layer_attrs["fill_value"] = input_value.value self.paddle_graph.add_layer( - "paddle.full", - inputs=inputs, - outputs=[node.name], - **layer_attrs) + "paddle.full", inputs=inputs, outputs=[node.name], **layer_attrs) def DepthToSpace(self, node): input = self.graph.get_input_node(node, 0) @@ -419,7 +418,8 @@ class TFOpMapper(OpMapper): if kernel.layer_type == 'Const': kernel_value = kernel.value else: - kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False) + kernel_value = self.decoder.infer_tensor( + kernel, use_diff_inputs=False) kernel_weight_name = op_name + ".weight" self.params[kernel_weight_name] = numpy.transpose(kernel_value, (3, 2, 0, 1)) @@ -444,7 +444,6 @@ class TFOpMapper(OpMapper): outputs=[input_name], shape=[0, k_size[2], 0, 0]) - self.paddle_graph.add_layer( kernel="paddle.nn.Conv2D", inputs={"input": input_name}, @@ -464,7 +463,7 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], perm=[0, 2, 3, 1]) - + def Conv3D(self, node): op_name = name_generator("conv", self.nn_name2id) output_name = node.name @@ -485,7 +484,8 @@ class TFOpMapper(OpMapper): if kernel.layer_type == 'Const': kernel_value = kernel.value else: - kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False) + kernel_value = self.decoder.infer_tensor( + kernel, use_diff_inputs=False) kernel_weight_name = op_name + ".weight" self.params[kernel_weight_name] = numpy.transpose(kernel_value, (4, 3, 0, 1, 2)) @@ -556,7 +556,7 @@ class TFOpMapper(OpMapper): assert moving_mean.layer_type == "Const" assert moving_var.layer_type == "Const" - input_name = input.name + input_name = input.name if data_format == "NHWC": transpose_name = gen_name("batch_norm", "transpose") self.paddle_graph.add_layer( @@ -567,12 +567,16 @@ class TFOpMapper(OpMapper): input_name = transpose_name n, h, w, c = input.out_shapes[0] else: - n, c, h, w = input.out_shapes[0] + n, c, h, w = input.out_shapes[0] - self.params["{}_{}".format(node.name, gamma.name)] = self.params[gamma.name] - self.params["{}_{}".format(node.name, beta.name)] = self.params[beta.name] - self.params["{}_{}".format(node.name, moving_mean.name)] = self.params[moving_mean.name] - self.params["{}_{}".format(node.name, moving_var.name)] = self.params[moving_var.name] + self.params["{}_{}".format(node.name, gamma.name)] = self.params[ + gamma.name] + self.params["{}_{}".format(node.name, beta.name)] = self.params[ + beta.name] + self.params["{}_{}".format(node.name, moving_mean.name)] = self.params[ + moving_mean.name] + self.params["{}_{}".format(node.name, moving_var.name)] = self.params[ + moving_var.name] self.paddle_graph.add_layer( kernel="paddle.nn.BatchNorm", inputs={"input": input_name}, @@ -581,8 +585,10 @@ class TFOpMapper(OpMapper): epsilon=node.get_attr("epsilon"), param_attr=string("{}_{}".format(node.name, gamma.name)), bias_attr=string("{}_{}".format(node.name, beta.name)), - moving_mean_name=string("{}_{}".format(node.name, moving_mean.name)), - moving_variance_name=string("{}_{}".format(node.name, moving_var.name)), + moving_mean_name=string("{}_{}".format(node.name, + moving_mean.name)), + moving_variance_name=string("{}_{}".format(node.name, + moving_var.name)), is_test=True) if data_format == "NHWC": @@ -591,7 +597,7 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], perm=[0, 2, 3, 1]) - + def FusedBatchNormV3(self, node): self.FusedBatchNorm(node) @@ -655,11 +661,10 @@ class TFOpMapper(OpMapper): outputs=[node.name], pad=paddings, value=constant_values) - + def MirrorPad(self, node): self.Pad(node) - - + def PadV2(self, node): self.Pad(node) @@ -679,7 +684,7 @@ class TFOpMapper(OpMapper): kernel="paddle.shape", inputs={"input": input_name}, outputs=[node.name]) - + def Size(self, node): input = self.graph.get_input_node(node, 0) input_name = input.name @@ -688,15 +693,12 @@ class TFOpMapper(OpMapper): inputs={"input": input_name}, outputs=[node.name]) self.paddle_graph.add_layer( - kernel="paddle.prod", - inputs={"x": node.name}, - outputs=[node.name]) - + kernel="paddle.prod", inputs={"x": node.name}, outputs=[node.name]) + def Ceil(self, node): input = self.graph.get_input_node(node, 0) self.paddle_graph.add_layer( - kernel="paddle.ceil", - inputs={"x": input.name}, + kernel="paddle.ceil", inputs={"x": input.name}, outputs=[node.name]) def ArgMax(self, node): @@ -709,7 +711,7 @@ class TFOpMapper(OpMapper): inputs={"x": input.name}, outputs=[node.name], axis=axis) - + def TopKV2(self, node): input = self.graph.get_input_node(node, 0) k = self.graph.get_input_node(node, 1) @@ -765,7 +767,6 @@ class TFOpMapper(OpMapper): self.params[kernel_weight_name] = numpy.transpose(kernel.value, (2, 3, 0, 1)) - input_name = input.name if data_format == "NHWC": in_shape = [in_shape[i] for i in [0, 3, 1, 2]] @@ -823,7 +824,7 @@ class TFOpMapper(OpMapper): op_name = name_generator("pool", self.nn_name2id) output_name = node.name layer_outputs = [op_name, output_name] - + # TODO(syf): The op has diff. self.paddle_graph.add_layer( kernel="paddle.nn.AvgPool2D", @@ -833,15 +834,6 @@ class TFOpMapper(OpMapper): stride=strides[2:4], padding=string(pad_mode)) -# self.paddle_graph.add_layer( -# kernel="fluid.layers.pool2d", -# inputs={"input": input_name}, -# outputs=[node.name], -# pool_size=k_size[2:4], -# pool_type=string("avg"), -# pool_stride=strides[2:4], -# pool_padding=string(pad_mode)) - if data_format == "NHWC": self.paddle_graph.add_layer( kernel="paddle.transpose", @@ -884,7 +876,9 @@ class TFOpMapper(OpMapper): axis = 1 else: raise Exception("Unexpected situation happend in Unpack OP") - layer_outputs = ["{}_p{}".format(node.layer_name, i) for i in range(num)] + layer_outputs = [ + "{}_p{}".format(node.layer_name, i) for i in range(num) + ] if len(layer_outputs) == 1: layer_outputs[0] = "[{}]".format(node.layer_name) self.paddle_graph.add_layer( @@ -910,7 +904,7 @@ class TFOpMapper(OpMapper): inputs={"x": input_names}, outputs=[node.name], axis=axis) - + def Concat(self, node): inputs_list = list() for i in range(1, len(node.inputs)): @@ -920,14 +914,14 @@ class TFOpMapper(OpMapper): axis = axis.value if axis < 0: axis += len(inputs_list[0].out_shapes[0]) - + input_names = [i.name for i in inputs_list] self.paddle_graph.add_layer( kernel="paddle.concat", inputs={"x": input_names}, outputs=[node.name], axis=axis) - + def AddN(self, node): inputs_list = list() for i in range(len(node.inputs) - 1): @@ -1005,7 +999,7 @@ class TFOpMapper(OpMapper): new_end.append(999999) else: new_end.append(end[i]) - + if input.dtype == "bool": self.paddle_graph.add_layer( "paddle.cast", @@ -1020,7 +1014,7 @@ class TFOpMapper(OpMapper): axes=[i for i in range(len(new_begin))], starts=new_begin, ends=new_end) - + if input.dtype == "bool": self.paddle_graph.add_layer( "paddle.cast", @@ -1043,7 +1037,7 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], axis=shrink_axes) - + def Prod(self, node): input = self.graph.get_input_node(node, 0) reduction_indices = self.graph.get_input_node(node, 1) @@ -1073,7 +1067,7 @@ class TFOpMapper(OpMapper): ], num_or_sections=num_split, axis=dim) - + def SplitV(self, node): input = self.graph.get_input_node(node, 0) size_splits = self.graph.get_input_node(node, 1) @@ -1082,12 +1076,13 @@ class TFOpMapper(OpMapper): dim = self.graph.get_input_node(node, 2) assert dim.layer_type == "Const", "dim of SplitV OP should be Const" dim = dim.value - + self.paddle_graph.add_layer( kernel="paddle.split", inputs={"x": input.name}, outputs=[ - "{}_p{}".format(node.layer_name, i) for i in range(len(size_splits)) + "{}_p{}".format(node.layer_name, i) + for i in range(len(size_splits)) ], num_or_sections=size_splits, axis=dim) @@ -1103,7 +1098,8 @@ class TFOpMapper(OpMapper): begin = begin.value.tolist() attrs['offsets'] = begin else: - begin = self.decoder.infer_tensor(begin, use_diff_inputs=False).tolist() + begin = self.decoder.infer_tensor( + begin, use_diff_inputs=False).tolist() attrs['offsets'] = begin if size.layer_type == "Const": size = size.value.tolist() @@ -1118,19 +1114,18 @@ class TFOpMapper(OpMapper): shape=shape) inputs['shape'] = reshape_name self.paddle_graph.add_layer( - kernel="paddle.crop", - inputs=inputs, - outputs=[node.name], - **attrs) + kernel="paddle.crop", inputs=inputs, outputs=[node.name], **attrs) def ResizeNearestNeighbor(self, node): input = self.graph.get_input_node(node, 0) resize_shape = self.graph.get_input_node(node, 1) data_format = "NHWC" inputs = {"x": input.name} - attrs = {"align_corners": node.get_attr("align_corners"), - "mode": string("nearest"), - "align_mode": 1} + attrs = { + "align_corners": node.get_attr("align_corners"), + "mode": string("nearest"), + "align_mode": 1 + } if resize_shape.layer_type == "Const": resize_shape = resize_shape.value.tolist() @@ -1166,15 +1161,17 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], perm=[0, 2, 3, 1]) - + def ResizeBilinear(self, node): input = self.graph.get_input_node(node, 0) resize_shape = self.graph.get_input_node(node, 1) data_format = "NHWC" inputs = {"x": input.name} - attrs = {"align_corners": node.get_attr("align_corners"), - "mode": string("bilinear"), - "align_mode": 1} + attrs = { + "align_corners": node.get_attr("align_corners"), + "mode": string("bilinear"), + "align_mode": 1 + } if resize_shape.layer_type == "Const": resize_shape = resize_shape.value.tolist() @@ -1279,15 +1276,17 @@ class TFOpMapper(OpMapper): if out_shape.layer_type == "Const": out_shape = out_shape.value.tolist() else: - out_shape = self.decoder.infer_tensor(out_shape, - out_shape=node.out_shapes[0]) + out_shape = self.decoder.infer_tensor( + out_shape, out_shape=node.out_shapes[0]) in_shape = input.out_shapes[0] if in_shape.count(-1) > 2: - in_shape = self.decoder.infer_tensor(input, use_diff_inputs=False).shape + in_shape = self.decoder.infer_tensor( + input, use_diff_inputs=False).shape k_size = kernel.out_shapes[0] if k_size.count(-1) > 2: - k_size = self.decoder.infer_tensor(kernel, use_diff_inputs=False).shape + k_size = self.decoder.infer_tensor( + kernel, use_diff_inputs=False).shape pad_mode = node.get_attr("padding").decode() strides = node.get_attr("strides") @@ -1310,30 +1309,20 @@ class TFOpMapper(OpMapper): perm=[0, 3, 1, 2]) input_name = transpose_name - # TODO(syf): The output_size is not set. -# self.paddle_graph.add_layer( -# kernel="paddle.nn.Conv2DTranspose", -# inputs={"input": input_name}, -# outputs=layer_outputs, -# weight_attr=string(kernel_name), -# bias_attr=False, -# in_channels=k_size[3], -# out_channels=k_size[2], -# kernel_size=k_size[0:2], -# stride=strides[2:4], -# dilation=dilations[2:4], -# padding=string(pad_mode)) self.paddle_graph.add_layer( "self.create_parameter", inputs={}, outputs=["{}_{}".format(node.name, kernel_name).replace(".", "_")], shape=self.params[kernel_name].shape, attr=string(kernel_name)) - + self.paddle_graph.add_layer( kernel="paddle.nn.functional.conv2d_transpose", - inputs={"x": input_name, - "weight": "{}_{}".format(node.name, kernel_name).replace(".", "_")}, + inputs={ + "x": input_name, + "weight": + "{}_{}".format(node.name, kernel_name).replace(".", "_") + }, outputs=[node.name], bias=None, stride=strides[2:4], @@ -1361,10 +1350,7 @@ class TFOpMapper(OpMapper): inputs["repeat_times"] = repeat_times.name self.paddle_graph.add_layer( - kernel="paddle.tile", - inputs=inputs, - outputs=[node.name], - **attr) + kernel="paddle.tile", inputs=inputs, outputs=[node.name], **attr) def Range(self, node): start = self.graph.get_input_node(node, 0) @@ -1379,7 +1365,7 @@ class TFOpMapper(OpMapper): if start.layer_type == "Const": attr["start"] = start.value else: - + inputs["start"] = start.name if limit.dtype.startswith('float'): dtype = limit.dtype @@ -1397,10 +1383,7 @@ class TFOpMapper(OpMapper): attr["dtype"] = string(node.dtype) self.paddle_graph.add_layer( - kernel="paddle.arange", - inputs=inputs, - outputs=[node.name], - **attr) + kernel="paddle.arange", inputs=inputs, outputs=[node.name], **attr) def SquaredDifference(self, node): x = self.graph.get_input_node(node, 0) @@ -1411,14 +1394,20 @@ class TFOpMapper(OpMapper): # TODO(syf) layer_id = self.paddle_graph.add_layer( "paddle.subtract", inputs=inputs, outputs=[node.name]) - self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape} + self.paddle_graph.layers[layer_id].input_shapes = { + "x": x_shape, + "y": y_shape + } inputs = {"x": node.name, "y": node.name} x_shape = node.out_shapes[0] y_shape = node.out_shapes[0] layer_id = self.paddle_graph.add_layer( "paddle.multiply", inputs=inputs, outputs=[node.name]) - self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape} + self.paddle_graph.layers[layer_id].input_shapes = { + "x": x_shape, + "y": y_shape + } def OneHot(self, node): input = self.graph.get_input_node(node, 0) @@ -1472,10 +1461,7 @@ class TFOpMapper(OpMapper): outputs=[input_name], dtype=string("bool")) self.paddle_graph.add_layer( - "paddle.all", - inputs={"x": input_name}, - outputs=[node.name], - **attr) + "paddle.all", inputs={"x": input_name}, outputs=[node.name], **attr) node.layer.attr['dtype'].type = 10 @@ -1496,10 +1482,7 @@ class TFOpMapper(OpMapper): shape=[-1]) inputs = {'x': embeddings.name, 'index': index_name} self.paddle_graph.add_layer( - "paddle.gather", - inputs=inputs, - outputs=[node.name], - axis=axis) + "paddle.gather", inputs=inputs, outputs=[node.name], axis=axis) if len(index.out_shapes[0]) != 1: out_shape = node.out_shapes[0] self.paddle_graph.add_layer( @@ -1507,15 +1490,13 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], shape=out_shape) - + def GatherNd(self, node): x = self.graph.get_input_node(node, 0) index = self.graph.get_input_node(node, 1) inputs = {'x': x.name, 'index': index.name} self.paddle_graph.add_layer( - "paddle.gather_nd", - inputs=inputs, - outputs=[node.name]) + "paddle.gather_nd", inputs=inputs, outputs=[node.name]) def ExpandDims(self, node): x = self.graph.get_input_node(node, 0, copy=True) @@ -1530,11 +1511,8 @@ class TFOpMapper(OpMapper): else: inputs['axis'] = y.name self.paddle_graph.add_layer( - "paddle.unsqueeze", - inputs=inputs, - outputs=[node.name], - **attr) - + "paddle.unsqueeze", inputs=inputs, outputs=[node.name], **attr) + def ReverseV2(self, node): x = self.graph.get_input_node(node, 0) axis = self.graph.get_input_node(node, 1) @@ -1548,7 +1526,114 @@ class TFOpMapper(OpMapper): else: inputs['axis'] = axis.name self.paddle_graph.add_layer( - "paddle.flip", - inputs=inputs, + "paddle.flip", inputs=inputs, outputs=[node.name], **attr) + + def BatchToSpaceND(self, node): + ''' + reshape->transpose->reshape->crop + ''' + x = self.graph.get_input_node(node, 0) + block_shape = self.graph.get_input_node(node, 1) + crops = self.graph.get_input_node(node, 2) + if block_shape.layer_type == "Const": + block_shape = block_shape.value.tolist() + if crops.layer_type == "Const": + crops = crops.value.tolist() + data_format = x.get_attr("data_format").decode() + if data_format == "NHWC": + n, h, w, c = x.out_shapes[0] + else: + n, c, h, w = x.out_shapes[0] + input_name = x.name + #reshape + shape = block_shape + [-1, h, w, c] + reshape_name = gen_name("batch_to_space", "reshape") + self.paddle_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": input_name}, + outputs=[reshape_name], + shape=shape) + #transpose + perm = [len(block_shape)] + list(j for i in range(len(block_shape)) for j in (i + len(block_shape) + 1, i)) +\ + list(i + 2*len(block_shape) + 1 for i in range(len(x.out_shapes[0]) - len(block_shape) - 1)) + transpose_name = gen_name("batch_to_space", "transpose") + self.paddle_graph.add_layer( + kernel="paddle.transpose", + inputs={"x": reshape_name}, + outputs=[transpose_name], + perm=perm) + #reshape + shape = [-1] + list(i * j + for i, j in zip(block_shape, x.out_shapes[0][ + 1:])) + x.out_shapes[0][1 + len(block_shape):] + reshape_name = gen_name("batch_to_space", "reshape") + self.paddle_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": transpose_name}, + outputs=[reshape_name], + shape=shape) + #crop + attrs = {} + crop_shape = shape + crop_offsets = [0] * len(shape) + for i in range(len(crops)): + crop_shape[i + 1] = crop_shape[i + 1] - crops[i][0] - crops[i][1] + crop_offsets[i + 1] = crops[i][0] + attrs['shape'] = crop_shape + attrs['offsets'] = crop_offsets + self.paddle_graph.add_layer( + kernel="paddle.crop", + inputs={"x": reshape_name}, + outputs=[node.name], + **attrs) + + def SpaceToBatchND(self, node): + ''' + zero-pad->reshape->transpose->reshape + ''' + x = self.graph.get_input_node(node, 0) + block_shape = self.graph.get_input_node(node, 1) + paddings = self.graph.get_input_node(node, 2) + if block_shape.layer_type == "Const": + block_shape = block_shape.value.tolist() + if paddings.layer_type == "Const": + paddings = paddings.value.flatten().tolist() + input_name = x.name + #zero-pad + constant_values = 0 + pad_name = gen_name("space_to_batch", "pad") + paddings = [0, 0] + paddings + [0, 0] + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.pad", + inputs={"x": input_name}, + outputs=[pad_name], + pad=paddings, + value=constant_values) + #reshape + n, h, w, c = x.out_shapes[0] + h = h + paddings[2] + paddings[3] + w = w + paddings[4] + paddings[5] + shape = [ + n, h // block_shape[0], block_shape[0], w // block_shape[1], + block_shape[1], c + ] + reshape_name = gen_name("space_to_batch", "reshape") + self.paddle_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": pad_name}, + outputs=[reshape_name], + shape=shape) + #transpose + transpose_name = gen_name("space_to_batch", "transpose") + self.paddle_graph.add_layer( + kernel="paddle.transpose", + inputs={"x": reshape_name}, + outputs=[transpose_name], + perm=[2, 4, 0, 1, 3, 5]) + #reshape + shape = [-1, h // block_shape[0], w // block_shape[1], c] + self.paddle_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": transpose_name}, outputs=[node.name], - **attr) + shape=shape) diff --git a/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py b/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py index 6ed02f5..80b5804 100644 --- a/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py +++ b/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py @@ -13,7 +13,7 @@ # limitations under the License. from x2paddle.decoder.tf_decoder import TFGraph, TFGraphNode -from x2paddle.core.program import PaddleGraph +from x2paddle.core.program import PaddleGraph from x2paddle.core.op_mapper import OpMapper from x2paddle.core.util import * from x2paddle import program @@ -60,8 +60,8 @@ class TFOpMapper(OpMapper): 'swish_f32': ['paddle.nn.functional.swish'], 'Tanh': ['paddle.tanh'], 'Softplus': ['paddle.nn.functional.softplus'], - 'LeakyRelu': ['paddle.nn.functional.leaky_relu', - dict(alpha='negative_slope')], + 'LeakyRelu': + ['paddle.nn.functional.leaky_relu', dict(alpha='negative_slope')], 'Floor': ['paddle.floor'], 'Erf': ['paddle.erf'], 'Square': ['paddle.square'] @@ -95,7 +95,8 @@ class TFOpMapper(OpMapper): if not self.op_checker(): raise Exception("Model is not supported yet.") self.params = dict() - self.paddle_graph = PaddleGraph(parent_layer=None, graph_type="static", source_type="tf") + self.paddle_graph = PaddleGraph( + parent_layer=None, graph_type="static", source_type="tf") self.params_output2id = dict() not_placeholder = list() @@ -135,7 +136,7 @@ class TFOpMapper(OpMapper): print("\nNodes converted.") self.paddle_graph.set_name(self.graph.graph_name) self.paddle_graph.set_parameters(self.params) - + def op_checker(self): unsupported_ops = set() for node_name in self.graph.topo_sort: @@ -150,8 +151,8 @@ class TFOpMapper(OpMapper): return True else: if len(unsupported_ops) > 0: - print("\n========= {} OPs are not supported yet ===========".format( - len(unsupported_ops))) + print("\n========= {} OPs are not supported yet ===========". + format(len(unsupported_ops))) for op in unsupported_ops: print("========== {} ============".format(op)) return False @@ -186,8 +187,11 @@ class TFOpMapper(OpMapper): inputs={"x": x.name, "y": y.name}, outputs=[node.name]) - self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape} - + self.paddle_graph.layers[layer_id].input_shapes = { + "x": x_shape, + "y": y_shape + } + def bool_map(self, node): op_type = self.bool_ops[node.layer_type] self.elementwise_map(node, op_type) @@ -241,7 +245,8 @@ class TFOpMapper(OpMapper): if perm.layer_type == "Const": perm = perm.value.tolist() else: - perm = self.decoder.infer_tensor(perm, use_diff_inputs=False).tolist() + perm = self.decoder.infer_tensor( + perm, use_diff_inputs=False).tolist() self.paddle_graph.add_layer( kernel="paddle.transpose", @@ -263,10 +268,7 @@ class TFOpMapper(OpMapper): attr["fill_value"] = input_value.value self.paddle_graph.add_layer( - "paddle.full", - inputs=inputs, - outputs=[node.name], - **attr) + "paddle.full", inputs=inputs, outputs=[node.name], **attr) if dims.layer_type != "Const": self.paddle_graph.add_layer( "paddle.reshape", @@ -328,14 +330,12 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], perm=[0, 2, 3, 1]) - + def Where(self, node): if len(node.layer.input) == 1: cond = self.graph.get_input_node(node, 0) self.paddle_graph.add_layer( - "paddle.nonzero", - inputs={"x": cond.name}, - outputs=[node.name]) + "paddle.nonzero", inputs={"x": cond.name}, outputs=[node.name]) else: cond = self.graph.get_input_node(node, 0) x = self.graph.get_input_node(node, 1) @@ -346,10 +346,10 @@ class TFOpMapper(OpMapper): "x": x.name, "y": y.name}, outputs=[node.name]) - + def Neg(self, node): input = self.graph.get_input_node(node, 0) - + self.paddle_graph.add_layer( "paddle.scale", inputs={"x": input.name}, @@ -409,7 +409,8 @@ class TFOpMapper(OpMapper): kernel_value = kernel.value kernel_weight_name = kernel.name.replace('/', '_') else: - kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False) + kernel_value = self.decoder.infer_tensor( + kernel, use_diff_inputs=False) if kernel.layer_type == 'Split': kernel_weight_name = "{}_{}_kernel".format(node.name, kernel.name) @@ -424,7 +425,7 @@ class TFOpMapper(OpMapper): shape=self.params[kernel_weight_name].shape, dtype=string(str(self.params[kernel_weight_name].dtype)), name=string(kernel_weight_name)) - + input_name = input.name if data_format == "NHWC": strides = [strides[i] for i in [0, 3, 1, 2]] @@ -447,7 +448,8 @@ class TFOpMapper(OpMapper): self.paddle_graph.add_layer( kernel="paddle.nn.functional.conv2d", - inputs={"x": input_name, "weight": kernel_weight_name}, + inputs={"x": input_name, + "weight": kernel_weight_name}, outputs=[node.name], bias=None, stride=strides[2:4], @@ -460,7 +462,7 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], perm=[0, 2, 3, 1]) - + def Conv3D(self, node): input = self.graph.get_input_node(node, 0) kernel = self.graph.get_input_node(node, 1) @@ -479,7 +481,8 @@ class TFOpMapper(OpMapper): kernel_value = kernel.value kernel_weight_name = kernel.name.replace('/', '_') else: - kernel_value = self.decoder.infer_tensor(kernel, use_diff_inputs=False) + kernel_value = self.decoder.infer_tensor( + kernel, use_diff_inputs=False) if kernel.layer_type == 'Split': kernel_weight_name = "{}_{}_kernel".format(node.name, kernel.name) @@ -494,7 +497,7 @@ class TFOpMapper(OpMapper): shape=self.params[kernel_weight_name].shape, dtype=string(str(self.params[kernel_weight_name].dtype)), name=string(kernel_weight_name)) - + input_name = input.name if data_format == "NDHWC": strides = [strides[i] for i in [0, 4, 1, 2, 3]] @@ -513,11 +516,12 @@ class TFOpMapper(OpMapper): kernel="paddle.reshape", inputs={"x": input_name}, outputs=[input_name], - shape=[0, k_size[2], 0, 0, 0]) - + shape=[0, k_size[2], 0, 0, 0]) + self.paddle_graph.add_layer( kernel="paddle.nn.functional.conv3d", - inputs={"x": input_name, "weight": kernel_weight_name}, + inputs={"x": input_name, + "weight": kernel_weight_name}, outputs=[node.name], bias=None, stride=strides[2:5], @@ -565,11 +569,13 @@ class TFOpMapper(OpMapper): self.paddle_graph.add_layer( kernel="paddle.nn.functional.batch_norm", - inputs={"x": input_name, - "running_mean": moving_mean.name, - "running_var": moving_var.name, - "weight": gamma.name, - "bias": beta.name}, + inputs={ + "x": input_name, + "running_mean": moving_mean.name, + "running_var": moving_var.name, + "weight": gamma.name, + "bias": beta.name + }, outputs=[node.name], epsilon=node.get_attr("epsilon")) @@ -579,7 +585,7 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], perm=[0, 2, 3, 1]) - + def FusedBatchNormV3(self, node): self.FusedBatchNorm(node) @@ -643,11 +649,10 @@ class TFOpMapper(OpMapper): outputs=[node.name], pad=paddings, value=constant_values) - + def MirrorPad(self, node): self.Pad(node) - - + def PadV2(self, node): self.Pad(node) @@ -676,15 +681,12 @@ class TFOpMapper(OpMapper): inputs={"input": input_name}, outputs=[node.name]) self.paddle_graph.add_layer( - kernel="paddle.prod", - inputs={"x": node.name}, - outputs=[node.name]) - + kernel="paddle.prod", inputs={"x": node.name}, outputs=[node.name]) + def Ceil(self, node): input = self.graph.get_input_node(node, 0) self.paddle_graph.add_layer( - kernel="paddle.ceil", - inputs={"x": input.name}, + kernel="paddle.ceil", inputs={"x": input.name}, outputs=[node.name]) def ArgMax(self, node): @@ -697,7 +699,7 @@ class TFOpMapper(OpMapper): inputs={"x": input.name}, outputs=[node.name], axis=axis) - + def TopKV2(self, node): input = self.graph.get_input_node(node, 0) k = self.graph.get_input_node(node, 1) @@ -748,8 +750,8 @@ class TFOpMapper(OpMapper): if len(kernel.outputs) == 1: self.params[kernel.name] = numpy.transpose(self.params[kernel.name], - (2, 3, 0, 1)) - layer = self.paddle_graph.layers[self.params_output2id[kernel.name]] + (2, 3, 0, 1)) + layer = self.paddle_graph.layers[self.params_output2id[kernel.name]] layer.attrs["shape"] = self.params[kernel.name].shape else: self.paddle_graph.add_layer( @@ -808,7 +810,7 @@ class TFOpMapper(OpMapper): strides = [strides[i] for i in [0, 3, 1, 2]] k_size = [k_size[i] for i in [0, 3, 1, 2]] input_name = transpose_name - + # TODO(syf): The op has diff. self.paddle_graph.add_layer( @@ -861,7 +863,9 @@ class TFOpMapper(OpMapper): axis = 1 else: raise Exception("Unexpected situation happend in Unpack OP") - layer_outputs = ["{}_p{}".format(node.layer_name, i) for i in range(num)] + layer_outputs = [ + "{}_p{}".format(node.layer_name, i) for i in range(num) + ] if len(layer_outputs) == 1: layer_outputs[0] = "[{}]".format(node.layer_name) self.paddle_graph.add_layer( @@ -887,7 +891,7 @@ class TFOpMapper(OpMapper): inputs={"x": input_names}, outputs=[node.name], axis=axis) - + def Concat(self, node): inputs_list = list() for i in range(1, len(node.inputs)): @@ -897,14 +901,14 @@ class TFOpMapper(OpMapper): axis = axis.value if axis < 0: axis += len(inputs_list[0].out_shapes[0]) - + input_names = [i.name for i in inputs_list] self.paddle_graph.add_layer( kernel="paddle.concat", inputs={"x": input_names}, outputs=[node.name], axis=axis) - + def AddN(self, node): inputs_list = list() for i in range(len(node.inputs) - 1): @@ -982,7 +986,7 @@ class TFOpMapper(OpMapper): new_end.append(999999) else: new_end.append(end[i]) - + if input.dtype == "bool": self.paddle_graph.add_layer( "paddle.cast", @@ -997,7 +1001,7 @@ class TFOpMapper(OpMapper): axes=[i for i in range(len(new_begin))], starts=new_begin, ends=new_end) - + if input.dtype == "bool": self.paddle_graph.add_layer( "paddle.cast", @@ -1020,7 +1024,7 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], axis=shrink_axes) - + def Prod(self, node): input = self.graph.get_input_node(node, 0) reduction_indices = self.graph.get_input_node(node, 1) @@ -1050,7 +1054,7 @@ class TFOpMapper(OpMapper): ], num_or_sections=num_split, axis=dim) - + def SplitV(self, node): input = self.graph.get_input_node(node, 0) size_splits = self.graph.get_input_node(node, 1) @@ -1059,12 +1063,13 @@ class TFOpMapper(OpMapper): dim = self.graph.get_input_node(node, 2) assert dim.layer_type == "Const", "dim of SplitV OP should be Const" dim = dim.value - + self.paddle_graph.add_layer( kernel="paddle.split", inputs={"x": input.name}, outputs=[ - "{}_p{}".format(node.layer_name, i) for i in range(len(size_splits)) + "{}_p{}".format(node.layer_name, i) + for i in range(len(size_splits)) ], num_or_sections=size_splits, axis=dim) @@ -1080,15 +1085,8 @@ class TFOpMapper(OpMapper): begin = begin.value.tolist() attrs['offsets'] = begin else: - # shape = begin.out_shapes[0] - # reshape_name = gen_name("slice", "reshape") - # self.paddle_graph.add_layer( - # kernel="fluid.layers.reshape", - # inputs={"x": begin.name}, - # outputs=[reshape_name], - # shape=shape) - # inputs['offsets'] = reshape_name - begin = self.decoder.infer_tensor(begin, use_diff_inputs=False).tolist() + begin = self.decoder.infer_tensor( + begin, use_diff_inputs=False).tolist() attrs['offsets'] = begin if size.layer_type == "Const": size = size.value.tolist() @@ -1103,19 +1101,18 @@ class TFOpMapper(OpMapper): shape=shape) inputs['shape'] = reshape_name self.paddle_graph.add_layer( - kernel="paddle.crop", - inputs=inputs, - outputs=[node.name], - **attrs) + kernel="paddle.crop", inputs=inputs, outputs=[node.name], **attrs) def ResizeNearestNeighbor(self, node): input = self.graph.get_input_node(node, 0) resize_shape = self.graph.get_input_node(node, 1) data_format = "NHWC" inputs = {"x": input.name} - attrs = {"align_corners": node.get_attr("align_corners"), - "mode": string("nearest"), - "align_mode": 1} + attrs = { + "align_corners": node.get_attr("align_corners"), + "mode": string("nearest"), + "align_mode": 1 + } if resize_shape.layer_type == "Const": resize_shape = resize_shape.value.tolist() @@ -1157,9 +1154,11 @@ class TFOpMapper(OpMapper): resize_shape = self.graph.get_input_node(node, 1) data_format = "NHWC" inputs = {"x": input.name} - attrs = {"align_corners": node.get_attr("align_corners"), - "mode": string("bilinear"), - "align_mode": 1} + attrs = { + "align_corners": node.get_attr("align_corners"), + "mode": string("bilinear"), + "align_mode": 1 + } if resize_shape.layer_type == "Const": resize_shape = resize_shape.value.tolist() @@ -1261,15 +1260,17 @@ class TFOpMapper(OpMapper): if out_shape.layer_type == "Const": out_shape = out_shape.value.tolist() else: - out_shape = self.decoder.infer_tensor(out_shape, - out_shape=node.out_shapes[0]) + out_shape = self.decoder.infer_tensor( + out_shape, out_shape=node.out_shapes[0]) in_shape = input.out_shapes[0] if in_shape.count(-1) > 2: - in_shape = self.decoder.infer_tensor(input, use_diff_inputs=False).shape + in_shape = self.decoder.infer_tensor( + input, use_diff_inputs=False).shape k_size = kernel.out_shapes[0] if k_size.count(-1) > 2: - k_size = self.decoder.infer_tensor(kernel, use_diff_inputs=False).shape + k_size = self.decoder.infer_tensor( + kernel, use_diff_inputs=False).shape pad_mode = node.get_attr("padding").decode() strides = node.get_attr("strides") @@ -1299,11 +1300,14 @@ class TFOpMapper(OpMapper): dtype=string(str(self.params[kernel_name].dtype)), shape=self.params[kernel_name].shape, name=string(kernel_name)) - + self.paddle_graph.add_layer( kernel="paddle.nn.functional.conv2d_transpose", - inputs={"x": input_name, - "weight": "{}_{}".format(node.name, kernel_name).replace(".", "_")}, + inputs={ + "x": input_name, + "weight": + "{}_{}".format(node.name, kernel_name).replace(".", "_") + }, outputs=[node.name], bias=None, stride=strides[2:4], @@ -1328,14 +1332,12 @@ class TFOpMapper(OpMapper): attr["repeat_times"] = repeat_times else: inputs["repeat_times"] = repeat_times.name - + self.paddle_graph.add_layer( - kernel="paddle.tile", - inputs=inputs, - outputs=[node.name], - **attr) - - if not isinstance(repeat_times, list) and repeat_times.layer_type != "Const": + kernel="paddle.tile", inputs=inputs, outputs=[node.name], **attr) + + if not isinstance(repeat_times, + list) and repeat_times.layer_type != "Const": self.paddle_graph.add_layer( kernel="paddle.reshape", inputs={"x": node.name}, @@ -1372,10 +1374,7 @@ class TFOpMapper(OpMapper): attr["dtype"] = string(node.dtype) self.paddle_graph.add_layer( - kernel="paddle.arange", - inputs=inputs, - outputs=[node.name], - **attr) + kernel="paddle.arange", inputs=inputs, outputs=[node.name], **attr) if start.layer_type != "Const" or \ limit.layer_type != "Const" or \ delta.layer_type != "Const": @@ -1394,14 +1393,20 @@ class TFOpMapper(OpMapper): # TODO(syf) layer_id = self.paddle_graph.add_layer( "paddle.subtract", inputs=inputs, outputs=[node.name]) - self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape} + self.paddle_graph.layers[layer_id].input_shapes = { + "x": x_shape, + "y": y_shape + } inputs = {"x": node.name, "y": node.name} x_shape = node.out_shapes[0] y_shape = node.out_shapes[0] layer_id = self.paddle_graph.add_layer( "paddle.multiply", inputs=inputs, outputs=[node.name]) - self.paddle_graph.layers[layer_id].input_shapes = {"x": x_shape, "y": y_shape} + self.paddle_graph.layers[layer_id].input_shapes = { + "x": x_shape, + "y": y_shape + } def OneHot(self, node): input = self.graph.get_input_node(node, 0) @@ -1455,10 +1460,7 @@ class TFOpMapper(OpMapper): outputs=[input_name], dtype=string("bool")) self.paddle_graph.add_layer( - "paddle.all", - inputs={"x": input_name}, - outputs=[node.name], - **attr) + "paddle.all", inputs={"x": input_name}, outputs=[node.name], **attr) node.layer.attr['dtype'].type = 10 @@ -1479,10 +1481,7 @@ class TFOpMapper(OpMapper): shape=[-1]) inputs = {'x': embeddings.name, 'index': index_name} self.paddle_graph.add_layer( - "paddle.gather", - inputs=inputs, - outputs=[node.name], - axis=axis) + "paddle.gather", inputs=inputs, outputs=[node.name], axis=axis) if len(index.out_shapes[0]) != 1: out_shape = node.out_shapes[0] self.paddle_graph.add_layer( @@ -1490,15 +1489,13 @@ class TFOpMapper(OpMapper): inputs={"x": node.name}, outputs=[node.name], shape=out_shape) - + def GatherNd(self, node): x = self.graph.get_input_node(node, 0) index = self.graph.get_input_node(node, 1) inputs = {'x': x.name, 'index': index.name} self.paddle_graph.add_layer( - "paddle.gather_nd", - inputs=inputs, - outputs=[node.name]) + "paddle.gather_nd", inputs=inputs, outputs=[node.name]) def ExpandDims(self, node): x = self.graph.get_input_node(node, 0, copy=True) @@ -1513,11 +1510,8 @@ class TFOpMapper(OpMapper): else: inputs['axis'] = y.name self.paddle_graph.add_layer( - "paddle.unsqueeze", - inputs=inputs, - outputs=[node.name], - **attr) - + "paddle.unsqueeze", inputs=inputs, outputs=[node.name], **attr) + def ReverseV2(self, node): x = self.graph.get_input_node(node, 0) axis = self.graph.get_input_node(node, 1) @@ -1531,8 +1525,114 @@ class TFOpMapper(OpMapper): else: inputs['axis'] = axis.name self.paddle_graph.add_layer( - "paddle.flip", - inputs=inputs, + "paddle.flip", inputs=inputs, outputs=[node.name], **attr) + + def BatchToSpaceND(self, node): + ''' + reshape->transpose->reshape->crop + ''' + x = self.graph.get_input_node(node, 0) + block_shape = self.graph.get_input_node(node, 1) + crops = self.graph.get_input_node(node, 2) + if block_shape.layer_type == "Const": + block_shape = block_shape.value.tolist() + if crops.layer_type == "Const": + crops = crops.value.tolist() + data_format = x.get_attr("data_format").decode() + if data_format == "NHWC": + n, h, w, c = x.out_shapes[0] + else: + n, c, h, w = x.out_shapes[0] + input_name = x.name + #reshape + shape = block_shape + [-1, h, w, c] + reshape_name = gen_name("batch_to_space", "reshape") + self.paddle_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": input_name}, + outputs=[reshape_name], + shape=shape) + #transpose + perm = [len(block_shape)] + list(j for i in range(len(block_shape)) for j in (i + len(block_shape) + 1, i)) +\ + list(i + 2*len(block_shape) + 1 for i in range(len(x.out_shapes[0]) - len(block_shape) - 1)) + transpose_name = gen_name("batch_to_space", "transpose") + self.paddle_graph.add_layer( + kernel="paddle.transpose", + inputs={"x": reshape_name}, + outputs=[transpose_name], + perm=perm) + #reshape + shape = [-1] + list(i * j + for i, j in zip(block_shape, x.out_shapes[0][ + 1:])) + x.out_shapes[0][1 + len(block_shape):] + reshape_name = gen_name("batch_to_space", "reshape") + self.paddle_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": transpose_name}, + outputs=[reshape_name], + shape=shape) + #crop + attrs = {} + crop_shape = shape + crop_offsets = [0] * len(shape) + for i in range(len(crops)): + crop_shape[i + 1] = crop_shape[i + 1] - crops[i][0] - crops[i][1] + crop_offsets[i + 1] = crops[i][0] + attrs['shape'] = crop_shape + attrs['offsets'] = crop_offsets + self.paddle_graph.add_layer( + kernel="paddle.crop", + inputs={"x": reshape_name}, outputs=[node.name], - **attr) + **attrs) + def SpaceToBatchND(self, node): + ''' + zero-pad->reshape->transpose->reshape + ''' + x = self.graph.get_input_node(node, 0) + block_shape = self.graph.get_input_node(node, 1) + paddings = self.graph.get_input_node(node, 2) + if block_shape.layer_type == "Const": + block_shape = block_shape.value.tolist() + if paddings.layer_type == "Const": + paddings = paddings.value.flatten().tolist() + input_name = x.name + #zero-pad + constant_values = 0 + pad_name = gen_name("space_to_batch", "pad") + paddings = [0, 0] + paddings + [0, 0] + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.pad", + inputs={"x": input_name}, + outputs=[pad_name], + pad=paddings, + value=constant_values) + #reshape + n, h, w, c = x.out_shapes[0] + h = h + paddings[2] + paddings[3] + w = w + paddings[4] + paddings[5] + shape = [ + n, h // block_shape[0], block_shape[0], w // block_shape[1], + block_shape[1], c + ] + reshape_name = gen_name("space_to_batch", "reshape") + self.paddle_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": pad_name}, + outputs=[reshape_name], + shape=shape) + #transpose + transpose_name = gen_name("space_to_batch", "transpose") + self.paddle_graph.add_layer( + kernel="paddle.transpose", + inputs={"x": reshape_name}, + outputs=[transpose_name], + perm=[2, 4, 0, 1, 3, 5]) + #reshape + shape = [-1, h // block_shape[0], w // block_shape[1], c] + self.paddle_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": transpose_name}, + outputs=[node.name], + shape=shape) -- GitLab