From 5bcd803c6e9acb5903c52078b7c428106ef60d1c Mon Sep 17 00:00:00 2001 From: SunAhong1993 Date: Thu, 13 Aug 2020 16:18:57 +0800 Subject: [PATCH] add models --- x2paddle/core/convert_prim.py | 34 +- x2paddle/core/program.py | 1 + x2paddle/op_mapper/pytorch2paddle/aten.py | 487 ++++++++++++++---- x2paddle/op_mapper/pytorch2paddle/prim.py | 96 +++- .../pytorch2paddle/pytorch_op_mapper.py | 50 +- 5 files changed, 543 insertions(+), 125 deletions(-) diff --git a/x2paddle/core/convert_prim.py b/x2paddle/core/convert_prim.py index 6765aa6..2a6a219 100644 --- a/x2paddle/core/convert_prim.py +++ b/x2paddle/core/convert_prim.py @@ -73,7 +73,7 @@ def convert_prim(layer, indent=1, init_func=[], forward_func=[]): elif layer.kernel == "prim.min": line = "{} = min({})".format(layer.outputs[0], list(layer.inputs.values())[0]) - elif layer.kernel == "prim.add": + elif layer.kernel == "prim.add_": line = "{} = {} + {} * {}".format(layer.outputs[0], list(layer.inputs.values())[0], layer.attrs["alpha"], @@ -124,11 +124,33 @@ def convert_prim(layer, indent=1, init_func=[], forward_func=[]): if list(layer.inputs.values())[1] is None: item1 = str(layer.attrs[list(layer.inputs.keys())[1]]) line = "{} = {} < {}".format(layer.outputs[0], item0, item1) + elif layer.kernel == "prim.ne": + item0 = list(layer.inputs.values())[0] + item1 = list(layer.inputs.values())[1] + line = "{} = {} < {}".format(layer.outputs[0], item0, item1) elif layer.kernel == "prim.slice": - attrs_str = "" - for k, v in layer.attrs.items(): - attrs_str += "{}:".format(v) - attrs_str = attrs_str[:-1] + inputs_str = "" + for v in list(layer.inputs.values())[1:]: + inputs_str += "{}:".format(v) + inputs_str = inputs_str[:-1] line = "{} = {}[{}]".format(layer.outputs[0], - list(layer.inputs.values())[0], attrs_str) + list(layer.inputs.values())[0], inputs_str) + elif layer.kernel == "prim.add": + line = "{} = {} + {}".format(layer.outputs[0], + list(layer.inputs.values())[0], + list(layer.inputs.values())[1]) + elif layer.kernel == "prim.sub": + line = "{} = {} - {}".format(layer.outputs[0], + list(layer.inputs.values())[0], + list(layer.inputs.values())[1]) + elif layer.kernel == "prim.mul": + line = "{} = {} * {}".format(layer.outputs[0], + list(layer.inputs.values())[0], + list(layer.inputs.values())[1]) + elif layer.kernel == "prim.neg": + line = "{} = -{}".format(layer.outputs[0], + list(layer.inputs.values())[0]) + else: + print(layer.kernel) + line = "" forward_func.extend(gen_codes([line], indent=indent)) diff --git a/x2paddle/core/program.py b/x2paddle/core/program.py index 382bb45..7ccc873 100644 --- a/x2paddle/core/program.py +++ b/x2paddle/core/program.py @@ -297,6 +297,7 @@ class PaddleGraph(object): for output_name in layer.outputs: if not output_name.startswith("x"): continue + print(layer.kernel) self.outputs.append(output_name) self.outputs = list(set(self.outputs)) diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index 529b1eb..738ca70 100644 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -30,16 +30,19 @@ def aten_adaptive_avg_pool2d(mapper, graph, node): layer_inputs = {} layer_attrs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%x.3 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs # 处理输入1,即%_output_size.1 if inputs_name[1] in mapper.attrs: layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]] else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) layer_attrs["pool_size"] = inputs_name[1] current_inputs.append(inputs_name[1]) layer_attrs["pool_type"] = string("avg") @@ -70,29 +73,34 @@ def aten_addmm(mapper, graph, node): layer_inputs = {} layer_attrs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%150 mapper._check_input( - graph, inputs_node[0], inputs_name[0], layer_outputs, add_dim=True) + graph, inputs_node[0], inputs_name[0], current_outputs, add_dim=True) layer_inputs["input"] = inputs_name[0] # 处理输入1,即%input.3 - mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs) + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) layer_inputs["x"] = inputs_name[1] # 处理输入2,即%156 - mapper._check_input(graph, inputs_node[2], inputs_name[2], layer_outputs) + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) layer_inputs["y"] = inputs_name[2] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs # 处理输入3,即%152 if inputs_name[3] in mapper.attrs: layer_attrs["beta"] = mapper.attrs[inputs_name[3]] else: + mapper._check_input(graph, inputs_node[3], inputs_name[3], + current_outputs) layer_attrs["beta"] = inputs_name[3] current_inputs.append(inputs_name[3]) # 处理输入4,即%151 if inputs_name[4] in mapper.attrs: layer_attrs["alpha"] = mapper.attrs[inputs_name[4]] else: + mapper._check_input(graph, inputs_node[4], inputs_name[4], + current_outputs) layer_attrs["alpha"] = inputs_name[4] current_inputs.append(inputs_name[4]) @@ -104,11 +112,41 @@ def aten_addmm(mapper, graph, node): return current_inputs, current_outputs +def aten_add(mapper, graph, node): + """ 构造数值相加的PaddleLayer,该节点实现out = x + y。 + + TorchScript示例: + %296 : int = aten::add(%i.12, %288) + 参数含义: + %296 (-): 相加结果。 + %i.12 (-): 输入数值 x。 + %288 (-): 输入数值 y。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%i.12 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%288 + mapper._check_input( + graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.add", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + def aten_add_(mapper, graph, node): - """ 构造add的PaddleLayer,该节点实现out = x + alpha * y。 + """ 构造数值相加的PaddleLayer,该节点实现out = x + alpha * y。 TorchScript示例: - %output.5 : Tensor = aten::add_(%output.2, %150, %151) + %137 : Tensor = aten::add(%136, %130, %130) 参数含义: %output.5 (Tensor): add结果Tensor。 %output.2 (Tensor): 输入Tensor x。 @@ -120,25 +158,28 @@ def aten_add_(mapper, graph, node): layer_inputs = {} layer_attrs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%output.2 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["x"] = inputs_name[0] # 处理输入1,即%150 mapper._check_input( - graph, inputs_node[1], inputs_name[1], layer_outputs, add_dim=True) + graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True) layer_inputs["y"] = inputs_name[1] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs # 处理输入2,即%151 if inputs_name[2] in mapper.attrs: layer_attrs["alpha"] = mapper.attrs[inputs_name[2]] else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs) layer_attrs["alpha"] = inputs_name[2] current_inputs.append(inputs_name[2]) graph.add_layer( - "prim.add", inputs=layer_inputs, outputs=layer_outputs, **layer_attrs) + "prim.add_", inputs=layer_inputs, outputs=layer_outputs, **layer_attrs) return current_inputs, current_outputs @@ -156,20 +197,87 @@ def aten_append(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即_output_size.1 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["list"] = inputs_name[0] # 处理输入1,即v.1 - mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs) + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) layer_inputs["element"] = inputs_name[1] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.append", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs +def aten_batch_norm(mapper, graph, node): + """ 构造BatchNorm的PaddleLayer。 + + TorchScript示例: + %input.81 : Tensor = aten::batch_norm(%input.80, %778, %779, %776, %777, %780, + %exponential_average_factor.23, %766, %781) + 参数含义: + %input.81 (Tensor): 输出,批处理后的结果。 + %input.80 (Tensor): 需要进行批处理的特征层。 + %778 (Tensor): weights。 + %779 (Tensor): bias。 + %776 (Tensor): 全局均值。 + %777 (Tensor): 全局方差。 + %780 (bool): 是否训练。 + %exponential_average_factor.23 (float): 用于计算均值和方差的比例。 + %766 (float): 为了数值稳定加在分母上的值。 + %781 (bool): 是否启用cudnn。 + """ + if "batchnorm" in mapper.dygraph_name_id: + mapper.dygraph_name_id["batchnorm"] += 1 + else: + mapper.dygraph_name_id["batchnorm"] = 0 + batchnorm_name = "batchnorm" + str(mapper.dygraph_name_id["batchnorm"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [batchnorm_name, output_name] + layer_inputs = {} + layer_attrs = {} + layer_attrs["is_test"] = True + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.80 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%778 + weights = mapper.pytorch_params[inputs_name[1]] + mapper.paddle_params[batchnorm_name + ".weight"] = weights + layer_attrs['num_channels'] = weights.shape[0] + # 处理输入2,即%779 + if inputs_name[2] in mapper.pytorch_params: + bias = mapper.pytorch_params[inputs_name[2]] + if bias is not None: + mapper.paddle_params[batchnorm_name + ".bias"] = bias + else: + mapper.paddle_params[batchnorm_name + ".bias"] = False + # 处理输入3,即%776 + mean = mapper.pytorch_params[inputs_name[3]] + mapper.paddle_params[batchnorm_name + "._mean"] = mean + # 处理输入4,即%777 + var = mapper.pytorch_params[inputs_name[4]] + mapper.paddle_params[batchnorm_name + "._variance"] = var + # 处理输入6,即%exponential_average_factor.23 + layer_attrs["momentum"] = mapper.attrs[inputs_name[6]] + # 处理输入7,即%766 + layer_attrs["epsilon"] = mapper.attrs[inputs_name[7]] + + graph.add_layer( + "fluid.dygraph.BatchNorm", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + def aten_conv2d(mapper, graph, node): """ 构造conv2d的PaddleLayer。 @@ -195,12 +303,13 @@ def aten_conv2d(mapper, graph, node): layer_inputs = {} layer_attrs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%input.8 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs[1:] # 处理输入1,即%25 weights = mapper.pytorch_params[inputs_name[1]] mapper.paddle_params[conv2d_name + ".weight"] = weights @@ -209,9 +318,12 @@ def aten_conv2d(mapper, graph, node): # 处理输入2,即%27 if inputs_name[2] in mapper.pytorch_params: bias = mapper.pytorch_params[inputs_name[2]] - mapper.paddle_params[conv2d_name + ".bias"] = bias + if bias is not None: + mapper.paddle_params[conv2d_name + ".bias"] = bias + else: + layer_attrs["bias_attr"] = False else: - mapper.paddle_params[conv2d_name + ".bias"] = False + layer_attrs["bias_attr"] = False # 处理输入3,即%28 layer_attrs["stride"] = mapper.attrs[inputs_name[3]] # 处理输入4,即%29 @@ -244,12 +356,13 @@ def aten_dim(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%input.8 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs) graph.add_layer( @@ -276,12 +389,13 @@ def aten_dropout(mapper, graph, node): layer_outputs = [dropout_name, output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%119 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] # 获取当前节点输入、输出的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs[1:] graph.add_layer( "fluid.dygraph.Dropout", @@ -305,15 +419,16 @@ def aten_eq(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%124 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) - layer_inputs["eq0"] = inputs_name[0] + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] # 处理输入1,即%123 - mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs) - layer_inputs["eq1"] = inputs_name[1] - # 获取当前节点输入、输出的list + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs @@ -336,6 +451,8 @@ def aten_flatten(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入1,即%4 graph.add_layer( "prim.assert", @@ -353,11 +470,10 @@ def aten_flatten(mapper, graph, node): key=mapper.attrs[inputs_name[2]], value=-1) # 处理输入0,即%x - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["x"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer( "fluid.layers.flatten", @@ -381,20 +497,68 @@ def aten___getitem__(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%72 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["list"] = inputs_name[0] # 处理输入1,即%88 - mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs) + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) layer_inputs["index"] = inputs_name[1] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.getitem", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs +def aten_hardtanh_(mapper, graph, node): + """ 构造hardtanh激活的PaddleLayer。 + + TorchScript示例: + %result.9 : Tensor = aten::hardtanh_(%input.20, %67, %66) + 参数含义: + %result.9 (Tensor): 输出,hardtanh激活后的Tensor。 + %input.20 (Tensor): 需要hardtanh激活的Tensor。 + %67 (float): hardtanh激活的最小阈值。 + %66 (float): hardtanh激活的最大阈值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入1,即%67 + graph.add_layer( + "prim.assert", + inputs={}, + outputs=[inputs_name[1]], + type='eq', + key=mapper.attrs[inputs_name[1]], + value=0.0) + # 处理输入2,即%66 + graph.add_layer( + "prim.assert", + inputs={}, + outputs=[inputs_name[2]], + type='eq', + key=mapper.attrs[inputs_name[2]], + value=6.0) + # 处理输入0,即%input.20 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + 'fluid.layers.relu6', + inputs=layer_inputs, + outputs=layer_outputs, + threshold=6.0) + return current_inputs, current_outputs + + def aten_le(mapper, graph, node): """ 构造对比大小的PaddleLayer。 @@ -409,15 +573,16 @@ def aten_le(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%78 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input0"] = inputs_name[0] # 处理输入1,即%79 - mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs) + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) layer_inputs["input1"] = inputs_name[1] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.le", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs @@ -436,12 +601,13 @@ def aten_len(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%72 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.len", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs @@ -471,12 +637,13 @@ def aten_max_pool2d(mapper, graph, node): layer_inputs = {} layer_attrs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%result.11 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs[1:] # 处理输入1,即%20 layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]] # 处理输入2,即%23 @@ -517,21 +684,106 @@ def aten_matmul(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%101 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["x"] = inputs_name[0] # 处理输入1,即%102 - mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs) + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) layer_inputs["y"] = inputs_name[1] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer( "fluid.layers.matmul", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs +def aten_mul(mapper, graph, node): + """ 构造数值相乘的PaddleLayer。 + + TorchScript示例: + %size_prods.39 : int = aten::mul(%size_prods.38, %114) + 参数含义: + %size_prods.39 (Tensor): 输出,相乘后的结果。 + %size_prods.38 (-): 数值1。 + %114 (-): 数值2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%size_prods.38 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%114 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + current_outputs = layer_outputs + + graph.add_layer("prim.mul", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_ne(mapper, graph, node): + """ 构造判断数值是否不相等的PaddleLayer。 + + TorchScript示例: + %134 : bool = aten::ne(%133, %132) + 参数含义: + %134 (bool): 对比后结果。 + %133 (-): 需对比的输入1。 + %132 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%123 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.ne", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_neg(mapper, graph, node): + """ 构造对数值取负的PaddleLayer。 + + TorchScript示例: + %909 : int = aten::neg(%908) + 参数含义: + %909 (int): 取负后结果。 + %908 (int): 需取负的输入。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.neg", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + def aten_relu_(mapper, graph, node): """ 构造ReLU激活的PaddleLayer。 @@ -547,12 +799,13 @@ def aten_relu_(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%result.5 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["x"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer( "fluid.layers.relu", inputs=layer_inputs, outputs=layer_outputs) @@ -574,12 +827,13 @@ def aten_relu6(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%result.5 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["x"] = inputs_name[0] # 获取当前节点输入、输出的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer( "fluid.layers.relu6", @@ -589,6 +843,37 @@ def aten_relu6(mapper, graph, node): return current_inputs, current_outputs +def aten_reshape(mapper, graph, node): + """ 构造调整大小的PaddleLayer。 + + TorchScript示例: + %x.6 : Tensor = aten::reshape(%4700, %4703) + 参数含义: + %x.6 (Tensor): 输出,reshape后的Tensor。 + %4700 (Tensor): 需要reshape的Tensor。 + %4703 (list): 形状大小组成的list。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%4700 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%4703 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["shape"] = inputs_name[1] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.reshape", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + def aten_size(mapper, graph, node): """ 构造获取shape的PaddleLayer。 @@ -602,12 +887,13 @@ def aten_size(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%x.12 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs @@ -628,35 +914,55 @@ def aten_slice(mapper, graph, node): output_name = mapper._get_outputs_name(node)[0] layer_outputs = [output_name] layer_inputs = {} - layer_attrs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%73 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list - current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs # 处理输入1,即%82 - if inputs_name[1] in mapper.attrs: - layer_attrs["start"] = mapper.attrs[inputs_name[1]] - else: - layer_attrs["start"] = inputs_name[1] - current_inputs.append(inputs_name[1]) + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["start"] = inputs_name[1] # 处理输入2,即%75 - if inputs_name[2] in mapper.attrs: - layer_attrs["end"] = mapper.attrs[inputs_name[2]] - else: - layer_attrs["end"] = inputs_name[2] - current_inputs.append(inputs_name[2]) + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + layer_inputs["end"] = inputs_name[2] # 处理输入3,即%77 - if inputs_name[3] in mapper.attrs: - layer_attrs["step"] = mapper.attrs[inputs_name[3]] - else: - layer_attrs["step"] = inputs_name[3] - current_inputs.append(inputs_name[3]) + mapper._check_input(graph, inputs_node[3], inputs_name[3], current_outputs) + layer_inputs["step"] = inputs_name[3] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) - graph.add_layer( - "prim.slice", inputs=layer_inputs, outputs=layer_outputs, **layer_attrs) + graph.add_layer("prim.slice", inputs=layer_inputs, outputs=current_outputs) + return current_inputs, current_outputs + + +def aten_sub(mapper, graph, node): + """ 构造数值相减的PaddleLayer。 + + TorchScript示例: + %840 : int = aten::sub(%839, %836) + 参数含义: + %840 (-): 相减结果。 + %839 (-): 输入数值 x。 + %836 (-): 输入数值 y。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%839 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%836 + mapper._check_input( + graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.sub", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs @@ -664,7 +970,7 @@ def aten_t(mapper, graph, node): """ 构造矩阵转置的PaddleLayer。 TorchScript示例: - %109 : Tensor = aten::t(%102) + %840 : int = aten::sub(%839, %836) 参数含义: %109 (Tensor): 输出,转置后的矩阵。 %102 (Tensor): 需要转置的Tensor。 @@ -673,12 +979,13 @@ def aten_t(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%x.12 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["x"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer( "fluid.layers.transpose", diff --git a/x2paddle/op_mapper/pytorch2paddle/prim.py b/x2paddle/op_mapper/pytorch2paddle/prim.py index ea9516f..600beef 100644 --- a/x2paddle/op_mapper/pytorch2paddle/prim.py +++ b/x2paddle/op_mapper/pytorch2paddle/prim.py @@ -53,14 +53,18 @@ def prim_GetAttr(mapper, graph, node): node = input_node except Exception: break - part_script = mapper.script - for field_name in field_name_list: - if hasattr(part_script, field_name): - param = getattr(part_script, field_name) - if isinstance(param, torch.Tensor): - param = param.detach().numpy() - mapper.pytorch_params[output_name] = param - part_script = param + if ".".join(field_name_list) in mapper.pytorch_params: + mapper.pytorch_params[output_name] = mapper.pytorch_params[".".join( + field_name_list)] + else: + part_script = mapper.script + for field_name in field_name_list: + if hasattr(part_script, field_name): + param = getattr(part_script, field_name) + if isinstance(param, torch.Tensor): + param = param.detach().numpy() + mapper.pytorch_params[output_name] = param + part_script = param return [], [output_name] @@ -78,12 +82,13 @@ def prim_ListConstruct(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理每个输入 for i, input_name in enumerate(inputs_name): layer_inputs["input{}".format(i)] = input_name - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.list", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs @@ -101,12 +106,13 @@ def prim_RaiseException(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%76 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer( "prim.exception", inputs=layer_inputs, outputs=layer_outputs) @@ -134,7 +140,10 @@ def prim_Loop(mapper, graph, node): block = list(node.blocks())[0] loop_outputs = node_outputs for i, block_input_ivalue in enumerate(block.inputs()): - block_input_node_name = 'x' + str(mapper.output_index) + if i == 0: + block_input_node_name = '_x' + str(mapper.output_index) + else: + block_input_node_name = 'x' + str(mapper.output_index) unique_id = block_input_ivalue.unique() if unique_id not in mapper.outputs_info: mapper.outputs_info[unique_id] = block_input_node_name @@ -226,12 +235,65 @@ def prim_min(mapper, graph, node): layer_outputs = [output_name] layer_inputs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] # 处理输入0,即%86 - mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs) + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) layer_inputs["input"] = inputs_name[0] - # 获取当前节点输入、输出的list + # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) - current_outputs = layer_outputs graph.add_layer("prim.min", inputs=layer_inputs, outputs=layer_outputs) return current_inputs, current_outputs + + +def prim_SetAttr(mapper, graph, node): + """ 设置attribute信息。 + + TorchScript示例: + = prim::SetAttr[name="num_batches_tracked"](%260, %277) + 参数含义: + %260 (-): 属性名前缀。 + %277 (-): 需要设置的值。 + """ + output_name = mapper._get_outputs_name(node)[0] + field_name_list = [] + tmp_node = node + while True: + input_node = list(tmp_node.inputs())[0].node() + try: + field_name_list.insert(0, input_node.s('name')) + tmp_node = input_node + except Exception: + break + field_name_list.append(node.s('name')) + + inputs_name, inputs_node = mapper._get_inputs_name(node) + param = {"Tensor": inputs_name[1]} + mapper.pytorch_params[".".join(field_name_list)] = param + return [], [output_name] + + +def prim_shape(mapper, graph, node): + """ 构造获取shape的PaddleLayer。 + + TorchScript示例: + %4701 : int[] = prim::shape(%result.1) + 参数含义: + %4701 (list): 输出,shape信息。 + %result.1 (Tensor): 需要获取shape的值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.8 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs diff --git a/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py b/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py index 7dc3e8c..dd5ff7b 100644 --- a/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py +++ b/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py @@ -32,8 +32,28 @@ class PyTorchOpMapper(OpMapper): self.output_index = 0 self.dygraph_name_id = {} # 动态图__init__输出名字中的id,key为kernel类型,value为id # 转换 + self.check_op(decoder.graph) self.graph, _ = self.traverse(decoder.graph) + def check_op(self, script_graph): + def _update_op_list(graph): + for node in graph.nodes(): + op_list.append(node.kind()) + for block in node.blocks(): + _update_op_list(block) + + op_list = list() + _update_op_list(script_graph) + op_list = list(set(op_list)) + unsupported_op_list = [] + for op in op_list: + func_name = op.replace('::', '_') + if not (hasattr(prim, func_name) or hasattr(aten, func_name)): + unsupported_op_list.append(op) + if len(unsupported_op_list) > 0: + raise Exception("The kind {} in model is not supported yet.".format( + unsupported_op_list)) + def traverse(self, script_graph, parent_layer=None): # 用于获取graph的输入 def _update_graph_inputs(inputs, outputs): @@ -65,9 +85,7 @@ class PyTorchOpMapper(OpMapper): func = getattr(aten, func_name) inputs, outputs = func(self, graph, node) _update_graph_inputs(inputs, outputs) - else: - raise Exception("The kind {} in model is not supported yet.". - format(node.kind())) + # 转换输出节点 if hasattr(script_graph, 'returnNode'): for i, ivalue in enumerate(script_graph.returnNode().inputs()): @@ -97,9 +115,9 @@ class PyTorchOpMapper(OpMapper): self.outputs_info[script_unique_id] = output_name self.output_index += 1 outputs_name.append(output_name) - # if节点没有输出的情况 + # if或loop节点没有输出的情况 if len(list(node.outputs())) == 0: - output_name = 'x' + str(self.output_index) + output_name = '_x' + str(self.output_index) self.output_index += 1 outputs_name.append(output_name) return outputs_name @@ -122,11 +140,19 @@ class PyTorchOpMapper(OpMapper): outputs=[output_name], value="params[{}]".format(string(output_name))) else: - graph.add_layer( - "prim.constant", - inputs={}, - outputs=[output_name], - value=string(param) if isinstance(param, str) else param) + if isinstance(param, dict) and "Tensor" in param: + graph.add_layer( + "prim.constant", + inputs={}, + outputs=[output_name], + value=param["Tensor"]) + else: + graph.add_layer( + "prim.constant", + inputs={}, + outputs=[output_name], + value=string(param) + if isinstance(param, str) else param) node_outputs.append(output_name) def _get_inputs_name(self, node): @@ -135,9 +161,9 @@ class PyTorchOpMapper(OpMapper): for script_input_ivalue in node.inputs(): script_input_node = script_input_ivalue.node() script_input_unique_id = script_input_ivalue.unique() - input_node_name = self.outputs_info[script_input_unique_id] + input_name = self.outputs_info[script_input_unique_id] inputs_node.append(script_input_node) - inputs_name.append(input_node_name) + inputs_name.append(input_name) return inputs_name, inputs_node def data(self, graph, node, uid): -- GitLab