From b3e5fdc7afec18e57077d13746a9073fcb2dcf46 Mon Sep 17 00:00:00 2001 From: wjj19950828 Date: Mon, 29 Nov 2021 16:32:11 +0800 Subject: [PATCH] Handling the case of underslashes --- x2paddle/op_mapper/pytorch2paddle/aten.py | 388 ++---------------- .../pytorch2paddle/pytorch_op_mapper.py | 8 + 2 files changed, 32 insertions(+), 364 deletions(-) diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index 7667d12..60586d6 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -263,41 +263,6 @@ def aten_addmm(mapper, graph, node): def aten_add(mapper, graph, node): - """ 构造数值相加的PaddleLayer,该节点实现out = x + y。 - TorchScript示例: - %296 : int = aten::add(%i.12, %288) - 参数含义: - %296 (-): 相加结果。 - %i.12 (-): 输入数值 x。 - %288 (-): 输入数值 y。 - """ - scope_name = mapper.normalize_scope_name(node) - output_name = mapper._get_outputs_name(node)[0] - layer_outputs = [output_name] - layer_inputs = {} - inputs_name, inputs_node = mapper._get_inputs_name(node) - # 获取当前节点输出的list - current_outputs = [output_name] - # 处理输入0,即%i.12 - mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, - scope_name) - layer_inputs["x"] = inputs_name[0] - # 处理输入1,即%288 - mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, - scope_name) - layer_inputs["y"] = inputs_name[1] - # 获取当前节点输入的list - current_inputs = list(layer_inputs.values()) - - graph.add_layer( - "prim.add", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name) - return current_inputs, current_outputs - - -def aten_add_(mapper, graph, node): """ 构造数值相加的PaddleLayer,该节点实现out = x + alpha * y。 TorchScript示例: %137 : Tensor = aten::add(%136, %130, %130) @@ -325,21 +290,29 @@ def aten_add_(mapper, graph, node): layer_inputs["y"] = inputs_name[1] # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - # 处理输入2,即%151 - if inputs_name[2] in mapper.attrs: - layer_attrs["alpha"] = mapper.attrs[inputs_name[2]] - else: - mapper._check_input(graph, inputs_node[2], inputs_name[2], - current_outputs, scope_name) - layer_inputs["alpha"] = inputs_name[2] - current_inputs.append(inputs_name[2]) + if len(inputs_name) > 2: + # 处理输入2,即%151 + if inputs_name[2] in mapper.attrs: + layer_attrs["alpha"] = mapper.attrs[inputs_name[2]] + else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs, scope_name) + layer_inputs["alpha"] = inputs_name[2] + current_inputs.append(inputs_name[2]) - graph.add_layer( - "prim.add_", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name, - **layer_attrs) + graph.add_layer( + "prim.add_", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name, + **layer_attrs) + else: + graph.add_layer( + "prim.add", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name, + **layer_attrs) return current_inputs, current_outputs @@ -1634,41 +1607,6 @@ def aten_dim(mapper, graph, node): return current_inputs, current_outputs -def aten_div_(mapper, graph, node): - """ 构造除法的PaddleLayer。 - TorchScript示例: - %bx_bw0.3 : Tensor = aten::div_(%bx_bw.3, %2678) - 参数含义: - %bx_bw0.3 (-): 除后的结果。 - %bx_bw.3 (-): 被除数。 - %2678 (int): 除数。 - """ - scope_name = mapper.normalize_scope_name(node) - output_name = mapper._get_outputs_name(node)[0] - layer_outputs = [output_name] - layer_inputs = {} - inputs_name, inputs_node = mapper._get_inputs_name(node) - # 获取当前节点输出的list - current_outputs = [output_name] - # 处理输入0,即%124 - mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, - scope_name) - layer_inputs["x"] = inputs_name[0] - # 处理输入1,即%123 - mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, - scope_name) - layer_inputs["y"] = inputs_name[1] - # 获取当前节点输入的list - current_inputs = list(layer_inputs.values()) - - graph.add_layer( - "prim.div", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name) - return current_inputs, current_outputs - - def aten_div(mapper, graph, node): """ 构造除法的PaddleLayer。 TorchScript示例: @@ -1737,39 +1675,6 @@ def aten_dropout(mapper, graph, node): return current_inputs, current_outputs -def aten_dropout_(mapper, graph, node): - """ 构造Dropout的PaddleLayer。 - TorchScript示例: - %119 : Tensor = aten::dropout_(%result.3, %117, %118) - 参数含义: - %119 (Tensor): Dropout后的Tensor。 - %result.3 (Tensor): 输入Tensor。 - %118 (bool): 是否是训练阶段。 - """ - scope_name = mapper.normalize_scope_name(node) - op_name = name_generator("dropout", mapper.nn_name2id) - output_name = mapper._get_outputs_name(node)[0] - layer_outputs = [op_name, output_name] - layer_inputs = {} - inputs_name, inputs_node = mapper._get_inputs_name(node) - # 获取当前节点输出的list - current_outputs = [output_name] - # 处理输入0,即%119 - mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, - scope_name) - layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list - current_inputs = list(layer_inputs.values()) - - graph.add_layer( - "paddle.nn.Dropout", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name, - p=0.0) - return current_inputs, current_outputs - - def aten_embedding(mapper, graph, node): """ 构造embedding的PaddleLayer。 TorchScript示例: @@ -2607,10 +2512,10 @@ def aten_gru(mapper, graph, node): return current_inputs, current_outputs -def aten_hardtanh_(mapper, graph, node): +def aten_hardtanh(mapper, graph, node): """ 构造hardtanh激活的PaddleLayer。 TorchScript示例: - %result.9 : Tensor = aten::hardtanh_(%input.20, %67, %66) + %result.9 : Tensor = aten::hardtanh(%input.20, %67, %66) 参数含义: %result.9 (Tensor): 输出,hardtanh激活后的Tensor。 %input.20 (Tensor): 需要hardtanh激活的Tensor。 @@ -2990,42 +2895,6 @@ def aten_le(mapper, graph, node): return current_inputs, current_outputs -def aten_leaky_relu_(mapper, graph, node): - """ 构造leaky relu激活的PaddleLayer。 - TorchScript示例: - %input.117 : Tensor = aten::leaky_relu_(%input.114, %1570) - 参数含义: - %input.117 (Tensor): 输出,leaky relu后的结果。 - %input.114 (Tensor): 需要leaky relu的Tensor。 - %1570 (float): 输入中的元素小于0时的斜率。 - """ - scope_name = mapper.normalize_scope_name(node) - op_name = name_generator("leakly_relu", mapper.nn_name2id) - output_name = mapper._get_outputs_name(node)[0] - layer_outputs = [op_name, output_name] - layer_inputs = {} - layer_attrs = {} - inputs_name, inputs_node = mapper._get_inputs_name(node) - # 获取当前节点输出的list - current_outputs = [output_name] - # 处理输入0,即%result.5 - mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, - scope_name) - layer_inputs["x"] = inputs_name[0] - # 获取当前节点输入、输出的list - current_inputs = list(layer_inputs.values()) - # 处理输入1,即%1570 - layer_attrs["negative_slope"] = mapper.attrs[inputs_name[1]] - - graph.add_layer( - "paddle.nn.LeakyReLU", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name, - **layer_attrs) - return current_inputs, current_outputs - - def aten_leaky_relu(mapper, graph, node): """ 构造leaky relu激活的PaddleLayer。 TorchScript示例: @@ -3293,115 +3162,6 @@ def aten_lt(mapper, graph, node): return current_inputs, current_outputs -def aten_masked_fill_(mapper, graph, node): - """ 构造填充mask的PaddleLayer。 - TorchScript示例: - %input.4 : Tensor = aten::masked_fill_(%scores.2, %mask.2, %46) - 参数含义: - %input.4 (Tensor): 输出,填充后的结果。 - %scores.2 (Tensor): 需要填充的Tensor。 - %mask.2 (Tensor): bool型的Tensor,哪些位置需要填充。 - %46 (-): 填充的值。 - """ - scope_name = mapper.normalize_scope_name(node) - output_name = mapper._get_outputs_name(node)[0] - layer_outputs = [output_name] - inputs_name, inputs_node = mapper._get_inputs_name(node) - # 获取当前节点输入的list - current_inputs = [] - # 获取当前节点输出的list - current_outputs = [output_name] - # 处理输入0,即%input.4 - mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, - scope_name) - current_inputs.append(inputs_name[0]) - graph.add_layer( - "prim.type", - inputs={"input": inputs_name[0]}, - outputs=[inputs_name[0] + "_type"], - scope_name=scope_name) - # 处理输入1,即%scores.2 - mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, - scope_name) - current_inputs.append(inputs_name[1]) - graph.add_layer( - "paddle.logical_not", - inputs={"x": inputs_name[1]}, - outputs=[inputs_name[1] + "_not"], - scope_name=scope_name) - graph.add_layer( - "paddle.cast", - inputs={"x": inputs_name[1]}, - outputs=[inputs_name[1] + "_mask"], - scope_name=scope_name, - dtype=inputs_name[0] + "_type") - graph.add_layer( - "paddle.cast", - inputs={"x": inputs_name[1] + "_not"}, - outputs=[inputs_name[1] + "_not_mask"], - scope_name=scope_name, - dtype=inputs_name[0] + "_type") - graph.add_layer( - "paddle.multiply", - inputs={"x": inputs_name[0], - "y": inputs_name[1] + "_not_mask"}, - outputs=[inputs_name[0] + "_not_mask"], - scope_name=scope_name) - # 处理输入2,即%46 - mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs, - scope_name) - graph.add_layer( - "prim.eq", - inputs={"x": inputs_name[2]}, - outputs=[inputs_name[2] + "_cond1"], - scope_name=scope_name, - y="-float('inf')") - graph.add_layer( - "prim.eq", - inputs={"x": inputs_name[2]}, - outputs=[inputs_name[2] + "_cond2"], - scope_name=scope_name, - y="float('inf')") - graph.add_layer( - "prim.or", - inputs={ - "x": inputs_name[2] + "_cond1", - "y": inputs_name[2] + "_cond2" - }, - outputs=[inputs_name[2] + "_cond"], - scope_name=scope_name) - graph.add_layer( - "prim.if", {'input': inputs_name[2] + "_cond"}, - outputs=[inputs_name[2] + "_if"], - scope_name=scope_name) - if_layer = graph.layers[list(graph.layers.keys())[-1]] - block = PaddleGraph(source_type="pytorch", parent_layer=if_layer) - block.add_layer( - "prim.equal", - inputs={"input": inputs_name[1] + "_mask"}, - outputs=[inputs_name[2] + "_1"], - scope_name=scope_name) - if_layer.add_block(block) - block = PaddleGraph(source_type="pytorch", parent_layer=if_layer) - block.add_layer( - "prim.mul", - inputs={"x": inputs_name[1] + "_mask", - "y": inputs_name[2]}, - outputs=[inputs_name[2] + "_1"], - scope_name=scope_name) - if_layer.add_block(block) - if_layer.inputs["input-0"] = inputs_name[1] + "_mask" - if_layer.inputs["input-1"] = inputs_name[2] - if_layer.outputs.append(inputs_name[2] + "_1") - graph.add_layer( - "paddle.add", - inputs={"x": inputs_name[2] + "_1", - "y": inputs_name[0] + "_not_mask"}, - outputs=layer_outputs, - scope_name=scope_name) - return current_inputs, current_outputs - - def aten_masked_fill(mapper, graph, node): """ 构造填充mask的PaddleLayer。 TorchScript示例: @@ -3799,42 +3559,6 @@ def aten_mul(mapper, graph, node): return current_inputs, current_outputs -def aten_mul_(mapper, graph, node): - """ 构造数值相乘的PaddleLayer。 - TorchScript示例: - %size_prods.39 : int = aten::mul_(%size_prods.38, %114) - 参数含义: - %size_prods.39 (Tensor): 输出,相乘后的结果。 - %size_prods.38 (-): 数值1。 - %114 (-): 数值2。 - """ - scope_name = mapper.normalize_scope_name(node) - output_name = mapper._get_outputs_name(node)[0] - layer_outputs = [output_name] - layer_inputs = {} - inputs_name, inputs_node = mapper._get_inputs_name(node) - # 获取当前节点输出的list - current_outputs = [output_name] - # 处理输入0,即%size_prods.38 - mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, - scope_name) - layer_inputs["x"] = inputs_name[0] - # 处理输入1,即%114 - mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, - scope_name) - layer_inputs["y"] = inputs_name[1] - # 获取当前节点输入的list - current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs - - graph.add_layer( - "prim.mul", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name) - return current_inputs, current_outputs - - def aten_ne(mapper, graph, node): """ 构造判断数值是否不相等的PaddleLayer。 TorchScript示例: @@ -4304,38 +4028,6 @@ def aten_relu(mapper, graph, node): return current_inputs, current_outputs -def aten_relu_(mapper, graph, node): - """ 构造ReLU激活的PaddleLayer。 - TorchScript示例: - %result.3 : Tensor = aten::relu_(%input.5) - 参数含义: - %result.3 (Tensor): 输出,ReLU后的结果。 - %result.5 (Tensor): 需要ReLU的Tensor。 - 注意: inplace这个参数在paddle中未实现 - """ - scope_name = mapper.normalize_scope_name(node) - op_name = name_generator("relu", mapper.nn_name2id) - output_name = mapper._get_outputs_name(node)[0] - layer_outputs = [op_name, output_name] - layer_inputs = {} - inputs_name, inputs_node = mapper._get_inputs_name(node) - # 获取当前节点输出的list - current_outputs = [output_name] - # 处理输入0,即%result.5 - mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, - scope_name) - layer_inputs["x"] = inputs_name[0] - # 获取当前节点输入的list - current_inputs = list(layer_inputs.values()) - - graph.add_layer( - "paddle.nn.ReLU", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name) - return current_inputs, current_outputs - - def aten_relu6(mapper, graph, node): """ 构造ReLU6激活的PaddleLayer。 TorchScript示例: @@ -4748,38 +4440,6 @@ def aten_silu(mapper, graph, node): return current_inputs, current_outputs -def aten_silu_(mapper, graph, node): - """ 构造Silu激活的PaddleLayer。 - TorchScript示例: - %result.3 : Tensor = aten::silu_(%input.5) - 参数含义: - %result.3 (Tensor): 输出,Silu后的结果。 - %input.5 (Tensor): 需要Silu的Tensor。 - 注意: inplace这个参数在paddle中未实现 - """ - scope_name = mapper.normalize_scope_name(node) - op_name = name_generator("silu", mapper.nn_name2id) - output_name = mapper._get_outputs_name(node)[0] - layer_outputs = [op_name, output_name] - layer_inputs = {} - inputs_name, inputs_node = mapper._get_inputs_name(node) - # 获取当前节点输出的list - current_outputs = [output_name] - # 处理输入0,即%input.5 - mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, - scope_name) - layer_inputs["x"] = inputs_name[0] - # 获取当前节点输入的list - current_inputs = list(layer_inputs.values()) - - graph.add_layer( - "paddle.nn.Silu", - inputs=layer_inputs, - outputs=layer_outputs, - scope_name=scope_name) - return current_inputs, current_outputs - - def aten_sin(mapper, graph, node): """ 构造数学计算sin的PaddleLayer。 TorchScript示例: diff --git a/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py b/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py index 5783598..562512a 100644 --- a/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py +++ b/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py @@ -55,6 +55,10 @@ class PyTorchOpMapper(): unsupported_ops = [] for op in op_list: func_name = op.replace('::', '_') + # Processing suffix is "_" situation, eg: aten_relu_ to aten_relu + # avoid aten::__isnot__ situation + if func_name[-1] == "_" and func_name[-2] != "_": + func_name = func_name[:-1] if not (hasattr(prim, func_name) or hasattr(aten, func_name)): unsupported_ops.append(op) if len(unsupported_ops) == 0: @@ -104,6 +108,10 @@ class PyTorchOpMapper(): for node in script_graph.nodes(): kind = node.kind() func_name = kind.replace('::', '_') + # Processing suffix is "_" situation, eg: aten_relu_ to aten_relu + # avoid aten::__isnot__ situation + if func_name[-1] == "_" and func_name[-2] != "_": + func_name = func_name[:-1] if hasattr(prim, func_name): func = getattr(prim, func_name) inputs, outputs = func(self, graph, node) -- GitLab