提交 4c85cdff 编写于 作者: S SunAhong1993

add comment and add readability

上级 795b3c1b
...@@ -59,14 +59,14 @@ class PaddleLayer(object): ...@@ -59,14 +59,14 @@ class PaddleLayer(object):
class PaddleGraph(object): class PaddleGraph(object):
def __init__(self, father_layer=None, graph_type="dygraph"): def __init__(self, parent_layer=None, graph_type="dygraph"):
self.layers = OrderedDict() self.layers = OrderedDict()
self.edges_out = dict() self.edges_out = dict()
self.edges_in = dict() self.edges_in = dict()
self.inputs = list() self.inputs = list()
self.outputs = list() self.outputs = list()
self.parameters = dict() self.parameters = dict()
self.father_layer = father_layer self.parent_layer = parent_layer
self.graph_type = graph_type self.graph_type = graph_type
def set_name(self, name): def set_name(self, name):
...@@ -89,9 +89,9 @@ class PaddleGraph(object): ...@@ -89,9 +89,9 @@ class PaddleGraph(object):
def add_layer(self, kernel, inputs, outputs, **kwargs): def add_layer(self, kernel, inputs, outputs, **kwargs):
layer_id = str(len(self.layers)) layer_id = str(len(self.layers))
if self.father_layer is not None: if self.parent_layer is not None:
layer_id = "{}.{}.{}".format(self.father_layer.id, layer_id = "{}.{}.{}".format(self.parent_layer.id,
len(self.father_layer.blocks), len(self.parent_layer.blocks),
layer_id) layer_id)
layer = PaddleLayer(layer_id, kernel, inputs, outputs, **kwargs) layer = PaddleLayer(layer_id, kernel, inputs, outputs, **kwargs)
self.layers[layer_id] = layer self.layers[layer_id] = layer
...@@ -135,7 +135,7 @@ class PaddleGraph(object): ...@@ -135,7 +135,7 @@ class PaddleGraph(object):
self.get_dygraph_outputs() self.get_dygraph_outputs()
def get_global_layers(self): def get_global_layers(self):
# 该全局layers的信息是按住奥拓扑排序组成的 # 该全局layers的信息是按拓扑排序组成的
def update(layers): def update(layers):
global_layers = dict() global_layers = dict()
for layer_id, layer in layers.items(): for layer_id, layer in layers.items():
...@@ -295,8 +295,7 @@ class PaddleGraph(object): ...@@ -295,8 +295,7 @@ class PaddleGraph(object):
continue continue
if self.edges_out.get(layer_id, 0) == 0: if self.edges_out.get(layer_id, 0) == 0:
for output_name in layer.outputs: for output_name in layer.outputs:
if output_name.endswith( if not output_name.startswith("x"):
"_assert") or not output_name.startswith("x"):
continue continue
self.outputs.append(output_name) self.outputs.append(output_name)
self.outputs = list(set(self.outputs)) self.outputs = list(set(self.outputs))
...@@ -358,7 +357,7 @@ class PaddleGraph(object): ...@@ -358,7 +357,7 @@ class PaddleGraph(object):
for layer_id, layer in self.layers.items(): for layer_id, layer in self.layers.items():
if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get( if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get(
layer_id, 0) == 0: layer_id, 0) == 0 and layer.kernel != "prim.assert":
continue continue
if "dygraph" in layer.kernel: if "dygraph" in layer.kernel:
line = "{}".format( line = "{}".format(
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import re
import torch import torch
......
...@@ -18,7 +18,7 @@ from x2paddle.core.util import * ...@@ -18,7 +18,7 @@ from x2paddle.core.util import *
def aten_adaptive_avg_pool2d(mapper, graph, node): def aten_adaptive_avg_pool2d(mapper, graph, node):
""" 构造average adaptive pool2d的PaddleLayer。 """ 构造average adaptive pool2d的PaddleLayer。
PyTorch Script 示例: TorchScript示例:
%x.5 : Tensor = aten::adaptive_avg_pool2d(%x.3, %_output_size.1) %x.5 : Tensor = aten::adaptive_avg_pool2d(%x.3, %_output_size.1)
参数含义: 参数含义:
%x.5 (Tensor): 池化后结果Tensor。 %x.5 (Tensor): 池化后结果Tensor。
...@@ -26,34 +26,36 @@ def aten_adaptive_avg_pool2d(mapper, graph, node): ...@@ -26,34 +26,36 @@ def aten_adaptive_avg_pool2d(mapper, graph, node):
%_output_size.1 (list): 自适应池化后的Tensor的宽、高大小。 %_output_size.1 (list): 自适应池化后的Tensor的宽、高大小。
""" """
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
adapoo2d_inputs = [] layer_inputs = {}
input_node = list(node.inputs())[0].node() layer_attrs = {}
script_input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[script_input_unique_id] # 处理输入0,即%x.3
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
adapoo2d_inputs.append(input_node_name) layer_inputs["input"] = inputs_name[0]
attr_node = list(node.inputs())[1].node() # 获取当前节点输入、输出的list
attr_unique_id = list(node.inputs())[1].unique() current_inputs = list(layer_inputs.values())
attr_node_name = mapper.outputs_info[attr_unique_id] current_outputs = layer_outputs
attrs = {} # 处理输入1,即%_output_size.1
attrs["pool_size"] = mapper.attrs[ if inputs_name[1] in mapper.attrs:
attr_node_name] if attr_node_name in mapper.attrs else attr_node_name layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]]
if attr_node_name not in mapper.attrs: else:
adapoo2d_inputs.append(attr_node_name) layer_attrs["pool_size"] = inputs_name[1]
attrs["pool_type"] = string("avg") current_inputs.append(inputs_name[1])
layer_attrs["pool_type"] = string("avg")
graph.add_layer( graph.add_layer(
"fluid.layers.adaptive_pool2d", "fluid.layers.adaptive_pool2d",
inputs={"input": input_node_name}, inputs=layer_inputs,
outputs=[output_name], outputs=layer_outputs,
**attrs) **layer_attrs)
return [input_node_name], node_outputs return current_inputs, current_outputs
def aten_addmm(mapper, graph, node): def aten_addmm(mapper, graph, node):
""" 构造addmm的PaddleLayer,该节点实现out = alpha ∗ x ∗ y + beta ∗ input。 """ 构造addmm的PaddleLayer,该节点实现out = alpha ∗ x ∗ y + beta ∗ input。
PyTorch Script 示例: TorchScript示例:
%ret.2 : Tensor = aten::addmm(%150, %input.3, %156, %151, %152) %ret.2 : Tensor = aten::addmm(%150, %input.3, %156, %151, %152)
参数含义: 参数含义:
%ret.2 (Tensor): addmm结果Tensor。 %ret.2 (Tensor): addmm结果Tensor。
...@@ -64,52 +66,48 @@ def aten_addmm(mapper, graph, node): ...@@ -64,52 +66,48 @@ def aten_addmm(mapper, graph, node):
%152 (int/float): 输入beta。 %152 (int/float): 输入beta。
""" """
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
inputs = {} layer_outputs = [output_name]
attrs = {} layer_inputs = {}
addmm_inputs = [] layer_attrs = {}
node_outputs = [output_name] inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node = list(node.inputs())[0].node() # 处理输入0,即%150
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
mapper._check_input( mapper._check_input(
graph, input_node, input_node_name, node_outputs, add_dim=True) graph, inputs_node[0], inputs_name[0], layer_outputs, add_dim=True)
inputs['input'] = input_node_name layer_inputs["input"] = inputs_name[0]
addmm_inputs.append(input_node_name) # 处理输入1,即%input.3
x_node = list(node.inputs())[1].node() mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
x_unique_id = list(node.inputs())[1].unique() layer_inputs["x"] = inputs_name[1]
x_node_name = mapper.outputs_info[x_unique_id] # 处理输入2,即%156
mapper._check_input(graph, x_node, x_node_name, node_outputs) mapper._check_input(graph, inputs_node[2], inputs_name[2], layer_outputs)
inputs['x'] = x_node_name layer_inputs["y"] = inputs_name[2]
addmm_inputs.append(x_node_name) # 获取当前节点输入、输出的list
y_node = list(node.inputs())[2].node() current_inputs = list(layer_inputs.values())
y_unique_id = list(node.inputs())[2].unique() current_outputs = layer_outputs
y_node_name = mapper.outputs_info[y_unique_id] # 处理输入3,即%152
mapper._check_input(graph, y_node, y_node_name, node_outputs) if inputs_name[3] in mapper.attrs:
inputs['y'] = y_node_name layer_attrs["beta"] = mapper.attrs[inputs_name[3]]
addmm_inputs.append(y_node_name) else:
beta_node = list(node.inputs())[3].node() layer_attrs["beta"] = inputs_name[3]
beta_unique_id = list(node.inputs())[3].unique() current_inputs.append(inputs_name[3])
beta_node_name = mapper.outputs_info[beta_unique_id] # 处理输入4,即%151
attrs['beta'] = mapper.attrs[ if inputs_name[4] in mapper.attrs:
beta_node_name] if beta_node_name in mapper.attrs else beta_node_name layer_attrs["alpha"] = mapper.attrs[inputs_name[4]]
if beta_node_name not in mapper.attrs: else:
addmm_inputs.append(beta_node_name) layer_attrs["alpha"] = inputs_name[4]
alpha_node = list(node.inputs())[4].node() current_inputs.append(inputs_name[4])
alpha_unique_id = list(node.inputs())[4].unique()
alpha_node_name = mapper.outputs_info[alpha_unique_id]
attrs['alpha'] = mapper.attrs[
alpha_node_name] if alpha_node_name in mapper.attrs else alpha_node_name
if alpha_node_name not in mapper.attrs:
addmm_inputs.append(alpha_node_name)
graph.add_layer( graph.add_layer(
"fluid.layers.addmm", inputs=inputs, outputs=[output_name], **attrs) "fluid.layers.addmm",
return addmm_inputs, node_outputs inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_add_(mapper, graph, node): def aten_add_(mapper, graph, node):
""" 构造add的PaddleLayer,该节点实现out = x + alpha * y。 """ 构造add的PaddleLayer,该节点实现out = x + alpha * y。
PyTorch Script 示例: TorchScript示例:
%output.5 : Tensor = aten::add_(%output.2, %150, %151) %output.5 : Tensor = aten::add_(%output.2, %150, %151)
参数含义: 参数含义:
%output.5 (Tensor): add结果Tensor。 %output.5 (Tensor): add结果Tensor。
...@@ -118,403 +116,573 @@ def aten_add_(mapper, graph, node): ...@@ -118,403 +116,573 @@ def aten_add_(mapper, graph, node):
%151 (int/float): 输入alpha。 %151 (int/float): 输入alpha。
""" """
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
inputs = {} layer_outputs = [output_name]
attrs = {} layer_inputs = {}
add_inputs = [] layer_attrs = {}
node_outputs = [output_name] inputs_name, inputs_node = mapper._get_inputs_name(node)
x_node = list(node.inputs())[0].node() # 处理输入0,即%output.2
x_unique_id = list(node.inputs())[0].unique() mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
x_node_name = mapper.outputs_info[x_unique_id] layer_inputs["x"] = inputs_name[0]
mapper._check_input(graph, x_node, x_node_name, node_outputs) # 处理输入1,即%150
inputs['x'] = x_node_name mapper._check_input(
add_inputs.append(x_node_name) graph, inputs_node[1], inputs_name[1], layer_outputs, add_dim=True)
y_node = list(node.inputs())[1].node() layer_inputs["y"] = inputs_name[1]
y_unique_id = list(node.inputs())[1].unique() # 获取当前节点输入、输出的list
y_node_name = mapper.outputs_info[y_unique_id] current_inputs = list(layer_inputs.values())
mapper._check_input(graph, y_node, y_node_name, node_outputs, add_dim=True) current_outputs = layer_outputs
inputs['y'] = y_node_name # 处理输入2,即%151
add_inputs.append(y_node_name) if inputs_name[2] in mapper.attrs:
alpha_node = list(node.inputs())[2].node() layer_attrs["alpha"] = mapper.attrs[inputs_name[2]]
alpha_unique_id = list(node.inputs())[2].unique() else:
alpha_node_name = mapper.outputs_info[alpha_unique_id] layer_attrs["alpha"] = inputs_name[2]
attrs['alpha'] = mapper.attrs[ current_inputs.append(inputs_name[2])
alpha_node_name] if alpha_node_name in mapper.attrs else alpha_node_name
if alpha_node_name not in mapper.attrs: graph.add_layer(
add_inputs.append(alpha_node_name) "prim.add", inputs=layer_inputs, outputs=layer_outputs, **layer_attrs)
graph.add_layer("prim.add", inputs=inputs, outputs=[output_name], **attrs) return current_inputs, current_outputs
return add_inputs, node_outputs
def aten_append(mapper, graph, node): def aten_append(mapper, graph, node):
""" 构造对list进行append的PaddleLayer。
TorchScript示例:
%90 : int[] = aten::append(%_output_size.1, %v.1)
参数含义:
%90 (list): 输出,append后的list。
%_output_size.1 (list): 需要进行append的list。
%v.1 (-): append的元素。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
inputs = {} layer_inputs = {}
for i, input_ivalue in enumerate(node.inputs()): inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node = input_ivalue.node() # 处理输入0,即_output_size.1
input_unique_id = input_ivalue.unique() mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
input_node_name = mapper.outputs_info[input_unique_id] layer_inputs["list"] = inputs_name[0]
mapper._check_input(graph, input_node, input_node_name, node_outputs) # 处理输入1,即v.1
if i == 0: mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
inputs['list'] = input_node_name layer_inputs["element"] = inputs_name[1]
else: # 获取当前节点输入、输出的list
inputs['element'] = input_node_name current_inputs = list(layer_inputs.values())
graph.add_layer("prim.append", inputs=inputs, outputs=[output_name]) current_outputs = layer_outputs
return list(inputs.values()), node_outputs
graph.add_layer("prim.append", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_conv2d(mapper, graph, node): def aten_conv2d(mapper, graph, node):
output_name = mapper._get_outputs_name(node)[0] """ 构造conv2d的PaddleLayer。
inputs = {}
attrs = {} TorchScript示例:
conv2d_inputs = [] %input.10 : Tensor = aten::conv2d(%input.8, %25, %27, %28, %29, %30, %26)
node_outputs = [output_name] 参数含义:
%input.10 (Tensor): 输出,卷积后的结果。
%input.8 (Tensor): 需要进行卷积的特征层。
%25 (Tensor): weights。
%27 (Tensor): bias。
%28 (int): 步长大小。
%29 (int): 填充大小。
%30 (int): 膨胀系数大小。
%26 (int): 卷积的组数。
"""
if "conv" in mapper.dygraph_name_id: if "conv" in mapper.dygraph_name_id:
mapper.dygraph_name_id["conv"] += 1 mapper.dygraph_name_id["conv"] += 1
else: else:
mapper.dygraph_name_id["conv"] = 0 mapper.dygraph_name_id["conv"] = 0
conv2d_name = "conv" + str(mapper.dygraph_name_id["conv"]) conv2d_name = "conv" + str(mapper.dygraph_name_id["conv"])
# 输入input output_name = mapper._get_outputs_name(node)[0]
input_node = list(node.inputs())[0].node() layer_outputs = [conv2d_name, output_name]
input_unique_id = list(node.inputs())[0].unique() layer_inputs = {}
input_node_name = mapper.outputs_info[input_unique_id] layer_attrs = {}
inputs['input'] = input_node_name inputs_name, inputs_node = mapper._get_inputs_name(node)
conv2d_inputs.append(input_node_name) # 处理输入0,即%input.8
# 输入weight mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
weight_node = list(node.inputs())[1].node() layer_inputs["input"] = inputs_name[0]
weight_unique_id = list(node.inputs())[1].unique() # 获取当前节点输入、输出的list
weight_node_name = mapper.outputs_info[weight_unique_id] current_inputs = list(layer_inputs.values())
weights = mapper.pytorch_params[weight_node_name] current_outputs = layer_outputs[1:]
mapper.paddle_params[conv2d_name + '.weight'] = weights # 处理输入1,即%25
attrs['num_filters'] = weights.shape[0] weights = mapper.pytorch_params[inputs_name[1]]
attrs['filter_size'] = weights.shape[2:] mapper.paddle_params[conv2d_name + ".weight"] = weights
# 输入bias layer_attrs["num_filters"] = weights.shape[0]
bias_node = list(node.inputs())[2].node() layer_attrs["filter_size"] = weights.shape[2:]
bias_unique_id = list(node.inputs())[2].unique() # 处理输入2,即%27
bias_node_name = mapper.outputs_info[bias_unique_id] if inputs_name[2] in mapper.pytorch_params:
if bias_node_name in mapper.pytorch_params: bias = mapper.pytorch_params[inputs_name[2]]
bias = mapper.pytorch_params[bias_node_name] mapper.paddle_params[conv2d_name + ".bias"] = bias
mapper.paddle_params[conv2d_name + '.bias'] = bias
else: else:
mapper.paddle_params[conv2d_name + '.bias'] = False mapper.paddle_params[conv2d_name + ".bias"] = False
# 输入stride # 处理输入3,即%28
stride_node = list(node.inputs())[3].node() layer_attrs["stride"] = mapper.attrs[inputs_name[3]]
stride_unique_id = list(node.inputs())[3].unique() # 处理输入4,即%29
stride_node_name = mapper.outputs_info[stride_unique_id] layer_attrs["padding"] = mapper.attrs[inputs_name[4]]
attrs['stride'] = mapper.attrs[stride_node_name] # 处理输入5,即%30
# 输入padding layer_attrs["dilation"] = mapper.attrs[inputs_name[5]]
padding_node = list(node.inputs())[4].node() # 处理输入6,即%26
padding_unique_id = list(node.inputs())[4].unique() layer_attrs["groups"] = mapper.attrs[inputs_name[6]]
padding_node_name = mapper.outputs_info[padding_unique_id] layer_attrs['num_channels'] = weights.shape[1] * mapper.attrs[inputs_name[
attrs['padding'] = mapper.attrs[padding_node_name] 6]]
# 输入dilation
dilation_node = list(node.inputs())[5].node()
dilation_unique_id = list(node.inputs())[5].unique()
dilation_node_name = mapper.outputs_info[dilation_unique_id]
attrs['dilation'] = mapper.attrs[dilation_node_name]
# 输入group
groups_node = list(node.inputs())[6].node()
groups_unique_id = list(node.inputs())[6].unique()
groups_node_name = mapper.outputs_info[groups_unique_id]
attrs['groups'] = mapper.attrs[groups_node_name]
attrs['num_channels'] = weights.shape[1] * mapper.attrs[groups_node_name]
graph.add_layer( graph.add_layer(
"fluid.dygraph.Conv2D", "fluid.dygraph.Conv2D",
inputs=inputs, inputs=layer_inputs,
outputs=[conv2d_name, output_name], outputs=layer_outputs,
**attrs) **layer_attrs)
return conv2d_inputs, node_outputs return current_inputs, current_outputs
def aten_dim(mapper, graph, node): def aten_dim(mapper, graph, node):
""" 构造获取维度的PaddleLayer。
TorchScript示例:
%106 : int = aten::dim(%101)
参数含义:
%106 (int): 输出,Tensor的维度。
%101 (Tensor): 输入的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
input_node = list(node.inputs())[0].node() layer_inputs = {}
input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[input_unique_id] # 处理输入0,即%input.8
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
graph.add_layer( layer_inputs["input"] = inputs_name[0]
"prim.shape", inputs={'input': input_node_name}, outputs=[output_name]) # 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer( graph.add_layer(
"prim.len", inputs={'input': output_name}, outputs=[output_name]) "prim.len", inputs={"input": output_name}, outputs=layer_outputs)
return [input_node_name], node_outputs return current_inputs, current_outputs
def aten_dropout(mapper, graph, node): def aten_dropout(mapper, graph, node):
output_name = mapper._get_outputs_name(node)[0] """ 构造Dropout的PaddleLayer。
node_outputs = [output_name]
TorchScript示例:
%119 : Tensor = aten::dropout(%result.3, %117, %118)
参数含义:
%119 (Tensor): Dropout后的Tensor。
%result.3 (Tensor): 输入Tensor。
%118 (bool): 是否是训练阶段。
"""
if "dropout" in mapper.dygraph_name_id: if "dropout" in mapper.dygraph_name_id:
mapper.dygraph_name_id["dropout"] += 1 mapper.dygraph_name_id["dropout"] += 1
else: else:
mapper.dygraph_name_id["dropout"] = 0 mapper.dygraph_name_id["dropout"] = 0
dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"]) dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"])
input_node = list(node.inputs())[0].node() output_name = mapper._get_outputs_name(node)[0]
input_unique_id = list(node.inputs())[0].unique() layer_outputs = [dropout_name, output_name]
input_node_name = mapper.outputs_info[input_unique_id] layer_inputs = {}
mapper._check_input(graph, input_node, input_node_name, node_outputs) inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%119
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs[1:]
graph.add_layer( graph.add_layer(
"fluid.dygraph.Dropout", "fluid.dygraph.Dropout",
inputs={"input": input_node_name}, inputs=layer_inputs,
outputs=[dropout_name, output_name], outputs=layer_outputs,
p=0.0) p=0.0)
return [input_node_name], node_outputs return current_inputs, current_outputs
def aten_eq(mapper, graph, node): def aten_eq(mapper, graph, node):
""" 构造判断数值是否相等的PaddleLayer。
TorchScript示例:
%125 : bool = aten::eq(%124, %123)
参数含义:
%125 (bool): 对比后结果。
%124 (-): 需对比的输入1。
%123 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
inputs = {} layer_inputs = {}
eq_inputs = [] inputs_name, inputs_node = mapper._get_inputs_name(node)
for i, input_ivalue in enumerate(node.inputs()): # 处理输入0,即%124
input_node = input_ivalue.node() mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
input_unique_id = input_ivalue.unique() layer_inputs["eq0"] = inputs_name[0]
input_node_name = mapper.outputs_info[input_unique_id] # 处理输入1,即%123
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
inputs['eq{}'.format(i)] = input_node_name layer_inputs["eq1"] = inputs_name[1]
eq_inputs.append(input_node_name) # 获取当前节点输入、输出的list
graph.add_layer("prim.eq", inputs=inputs, outputs=[output_name]) current_inputs = list(layer_inputs.values())
return list(inputs.values()), node_outputs current_outputs = layer_outputs
graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_flatten(mapper, graph, node): def aten_flatten(mapper, graph, node):
# 目前只支持第一维的flatten """ 构造flatten的PaddleLayer。
TorchScript示例:
%x.8 : Tensor = aten::flatten(%x, %4, %2)
参数含义:
%x.8 (Tensor): flatten后结果。
%x (Tensor): 输入Tensor。
%4 (int): flatten的开始维度。
%2 (int): flatten的结束维度。
注意:目前flatten只支持第一维的flatten
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
flatten_inputs = [] layer_inputs = {}
for i, input_ivalue in enumerate(node.inputs()): inputs_name, inputs_node = mapper._get_inputs_name(node)
if i == 0: # 处理输入1,即%4
continue graph.add_layer(
input_node = input_ivalue.node() "prim.assert",
input_unique_id = input_ivalue.unique() inputs={},
input_node_name = mapper.outputs_info[input_unique_id] outputs=[inputs_name[1]],
mapper._check_input(graph, input_node, input_node_name, node_outputs) type='eq',
graph.add_layer( key=mapper.attrs[inputs_name[1]],
"prim.assert", value=1)
inputs={}, # 处理输入2,即%2
outputs=[output_name + '_assert'], graph.add_layer(
type='eq', "prim.assert",
key=mapper.attrs[input_node_name], inputs={},
value=1 if i == 1 else -1) outputs=[inputs_name[2]],
flatten_inputs.append(input_node_name) type='eq',
input_node = list(node.inputs())[0].node() key=mapper.attrs[inputs_name[2]],
input_unique_id = list(node.inputs())[0].unique() value=-1)
input_node_name = mapper.outputs_info[input_unique_id] # 处理输入0,即%x
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer( graph.add_layer(
"fluid.layers.flatten", "fluid.layers.flatten",
inputs={'x': input_node_name}, inputs=layer_inputs,
outputs=[output_name], outputs=layer_outputs,
axis=1) axis=1)
flatten_inputs.append(input_node_name) return current_inputs, current_outputs
return flatten_inputs, node_outputs
def aten___getitem__(mapper, graph, node): def aten___getitem__(mapper, graph, node):
""" 构造获取list中元素的PaddleLayer。
TorchScript示例:
%v.1 : int = aten::__getitem__(%72, %88)
参数含义:
%v.1 (-): 输出,list中的元素。
%72 (list): 需要获取元素的list。
%88 (int): 索引。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
inputs = {} layer_inputs = {}
for i, input_ivalue in enumerate(node.inputs()): inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node = input_ivalue.node() # 处理输入0,即%72
input_unique_id = input_ivalue.unique() mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
input_node_name = mapper.outputs_info[input_unique_id] layer_inputs["list"] = inputs_name[0]
mapper._check_input(graph, input_node, input_node_name, node_outputs) # 处理输入1,即%88
if i == 0: mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
inputs['list'] = input_node_name layer_inputs["index"] = inputs_name[1]
else: # 获取当前节点输入、输出的list
inputs['index'] = input_node_name current_inputs = list(layer_inputs.values())
graph.add_layer("prim.getitem", inputs=inputs, outputs=[output_name]) current_outputs = layer_outputs
return list(inputs.values()), node_outputs
graph.add_layer("prim.getitem", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_le(mapper, graph, node): def aten_le(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
TorchScript示例:
%80 : bool = aten::le(%78, %79)
参数含义:
%80 (bool): 输出,第一个元素是否小于第二个元素。
%78 (-): 需对比的输入1。
%79 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
inputs = {} layer_inputs = {}
for i, input_ivalue in enumerate(node.inputs()): inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node = input_ivalue.node() # 处理输入0,即%78
input_unique_id = input_ivalue.unique() mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
input_node_name = mapper.outputs_info[input_unique_id] layer_inputs["input0"] = inputs_name[0]
mapper._check_input(graph, input_node, input_node_name, node_outputs) # 处理输入1,即%79
inputs['input{}'.format(i)] = input_node_name mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
graph.add_layer("prim.le", inputs=inputs, outputs=[output_name]) layer_inputs["input1"] = inputs_name[1]
return list(inputs.values()), node_outputs # 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.le", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_len(mapper, graph, node): def aten_len(mapper, graph, node):
""" 构造获取list长度的PaddleLayer。
TorchScript示例:
%85 : int = aten::len(%83)
参数含义:
%85 (int): 输出,list的长度。
%72 (list): 需要获取长度的list。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
input_node = list(node.inputs())[0].node() layer_inputs = {}
input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[input_unique_id] # 处理输入0,即%72
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
graph.add_layer( layer_inputs["input"] = inputs_name[0]
"prim.len", inputs={'input': input_node_name}, outputs=[output_name]) # 获取当前节点输入、输出的list
return [input_node_name], node_outputs current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.len", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_max_pool2d(mapper, graph, node): def aten_max_pool2d(mapper, graph, node):
output_name = mapper._get_outputs_name(node)[0] """ 构造最大池化的PaddleLayer。
node_outputs = [output_name]
inputs = {} TorchScript示例:
attrs = {} %input.8 : Tensor = aten::max_pool2d(%result.11, %20, %23, %21, %22, %19)
pool_inputs = [] 参数含义:
%input.8 (Tensor): 输出,池化后的结果。
%result.11 (Tensor): 需要池化的Tensor。
%20 (list): 池化kernel的大小。
%23 (list): 步长大小。
%21 (list): 填充大小。
%22 (list): 膨胀系数大小。
%19 (bool): 是否用ceil函数计算输出高度和宽度。
"""
if "pool" in mapper.dygraph_name_id: if "pool" in mapper.dygraph_name_id:
mapper.dygraph_name_id["pool"] += 1 mapper.dygraph_name_id["pool"] += 1
else: else:
mapper.dygraph_name_id["pool"] = 0 mapper.dygraph_name_id["pool"] = 0
pool_name = "pool" + str(mapper.dygraph_name_id["pool"]) pool_name = "pool" + str(mapper.dygraph_name_id["pool"])
for i, input_ivalue in enumerate(node.inputs()): output_name = mapper._get_outputs_name(node)[0]
input_node = input_ivalue.node() layer_outputs = [pool_name, output_name]
input_unique_id = input_ivalue.unique() layer_inputs = {}
input_node_name = mapper.outputs_info[input_unique_id] layer_attrs = {}
if i == 0: inputs_name, inputs_node = mapper._get_inputs_name(node)
mapper._check_input(graph, input_node, input_node_name, # 处理输入0,即%result.11
node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
inputs['input'] = input_node_name layer_inputs["input"] = inputs_name[0]
pool_inputs.append(input_node_name) # 获取当前节点输入、输出的list
elif i == 1: current_inputs = list(layer_inputs.values())
attrs['pool_size'] = mapper.attrs[input_node_name] current_outputs = layer_outputs[1:]
elif i == 2: # 处理输入1,即%20
attrs['pool_stride'] = mapper.attrs[input_node_name] layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]]
elif i == 3: # 处理输入2,即%23
attrs['pool_padding'] = mapper.attrs[input_node_name] layer_attrs["pool_stride"] = mapper.attrs[inputs_name[2]]
elif i == 4: # 处理输入3,即%21
graph.add_layer( layer_attrs["pool_padding"] = mapper.attrs[inputs_name[3]]
"prim.assert", # 处理输入4,即%22
inputs={}, graph.add_layer(
outputs=[output_name + '_assert'], "prim.assert",
type='eq', inputs={},
key=mapper.attrs[input_node_name], outputs=[inputs_name[4]],
value=[1, [1, 1]]) type="eq",
pool_inputs.append(input_node_name) key=mapper.attrs[inputs_name[4]],
elif i == 5: value=[1, [1, 1]])
attrs['ceil_mode'] = mapper.attrs[ # 处理输入5,即%19
input_node_name] if input_node_name in mapper.attrs else input_node_name layer_attrs["ceil_mode"] = mapper.attrs[inputs_name[5]]
if input_node_name not in mapper.attrs: layer_attrs["pool_type"] = string("max")
pool_inputs.append(input_node_name)
attrs['pool_type'] = string('max')
graph.add_layer( graph.add_layer(
"fluid.dygraph.Pool2D", "fluid.dygraph.Pool2D",
inputs=inputs, inputs=layer_inputs,
outputs=[pool_name, output_name], outputs=layer_outputs,
**attrs) **layer_attrs)
return pool_inputs, node_outputs return current_inputs, current_outputs
def aten_matmul(mapper, graph, node): def aten_matmul(mapper, graph, node):
""" 构造矩阵相乘的PaddleLayer。
TorchScript示例:
%output.2 : Tensor = aten::matmul(%101, %111)
参数含义:
%output.2 (Tensor): 输出,相乘后的结果。
%101 (Tensor): 矩阵1。
%102 (Tensor): 矩阵2。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
inputs = {} layer_inputs = {}
x_node = list(node.inputs())[0].node() inputs_name, inputs_node = mapper._get_inputs_name(node)
x_unique_id = list(node.inputs())[0].unique() # 处理输入0,即%101
x_node_name = mapper.outputs_info[x_unique_id] mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
mapper._check_input(graph, x_node, x_node_name, node_outputs) layer_inputs["x"] = inputs_name[0]
inputs['x'] = x_node_name # 处理输入1,即%102
y_node = list(node.inputs())[1].node() mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
y_unique_id = list(node.inputs())[1].unique() layer_inputs["y"] = inputs_name[1]
y_node_name = mapper.outputs_info[y_unique_id] # 获取当前节点输入、输出的list
inputs['y'] = y_node_name current_inputs = list(layer_inputs.values())
mapper._check_input(graph, y_node, y_node_name, node_outputs) current_outputs = layer_outputs
graph.add_layer("fluid.layers.matmul", inputs=inputs, outputs=[output_name])
return list(inputs.values()), node_outputs graph.add_layer(
"fluid.layers.matmul", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_relu_(mapper, graph, node): def aten_relu_(mapper, graph, node):
""" 构造ReLU激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu_(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU后的结果。
%result.5 (Tensor): 需要ReLU的Tensor。
注意: inplace这个参数在paddle中未实现
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
input_node = list(node.inputs())[0].node() layer_inputs = {}
input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[input_unique_id] # 处理输入0,即%result.5
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
# inplace这个参数在paddle中未实现 layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer( graph.add_layer(
"fluid.layers.relu", "fluid.layers.relu", inputs=layer_inputs, outputs=layer_outputs)
inputs={"x": input_node_name}, return current_inputs, current_outputs
outputs=[output_name])
return [input_node_name], node_outputs
def aten_relu6(mapper, graph, node): def aten_relu6(mapper, graph, node):
""" 构造ReLU6激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu6(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU6后的结果。
%result.5 (Tensor): 需要ReLU6的Tensor。
注意: inplace这个参数在paddle中未实现
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
input_node = list(node.inputs())[0].node() layer_inputs = {}
input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[input_unique_id] # 处理输入0,即%result.5
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
# inplace这个参数在paddle中未实现 layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer( graph.add_layer(
"fluid.layers.relu6", "fluid.layers.relu6",
inputs={"x": input_node_name}, inputs=layer_inputs,
outputs=[output_name], outputs=layer_outputs,
threshold=6.0) threshold=6.0)
return [input_node_name], node_outputs return current_inputs, current_outputs
def aten_size(mapper, graph, node): def aten_size(mapper, graph, node):
""" 构造获取shape的PaddleLayer。
TorchScript示例:
%73 : int[] = aten::size(%x.12)
参数含义:
%73 (list): 输出,shape的list。
%x.12 (Tensor): 需要获取shape的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
input_node = list(node.inputs())[0].node() layer_inputs = {}
input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[input_unique_id] # 处理输入0,即%x.12
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
graph.add_layer( layer_inputs["input"] = inputs_name[0]
"prim.shape", inputs={'input': input_node_name}, outputs=[output_name]) # 获取当前节点输入、输出的list
return [input_node_name], node_outputs current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_slice(mapper, graph, node): def aten_slice(mapper, graph, node):
""" 构造切分list的PaddleLayer。
TorchScript示例:
%83 : int[] = aten::slice(%73, %82, %75, %77)
参数含义:
%83 (list): 输出,切分后的list。
%73 (list): 需要切分的list。
%82 (int): 切分的开始索引。
%75 (int): 切分的结束索引。
%77 (int): 切分的步长。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
attrs = {} layer_inputs = {}
slice_inputs = [] layer_attrs = {}
input_node = list(node.inputs())[0].node() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_unique_id = list(node.inputs())[0].unique() # 处理输入0,即%73
input_node_name = mapper.outputs_info[input_unique_id] mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
slice_inputs.append(input_node_name) layer_inputs["input"] = inputs_name[0]
strat_node = list(node.inputs())[1].node() # 获取当前节点输入、输出的list
start_unique_id = list(node.inputs())[1].unique() current_inputs = list(layer_inputs.values())
start_node_name = mapper.outputs_info[start_unique_id] current_outputs = layer_outputs
slice_inputs.append(start_node_name) # 处理输入1,即%82
attrs['start'] = mapper.attrs[ if inputs_name[1] in mapper.attrs:
start_node_name] if start_node_name in mapper.attrs else start_node_name layer_attrs["start"] = mapper.attrs[inputs_name[1]]
if start_node_name not in mapper.attrs: else:
mapper._check_input(graph, strat_node, start_node_name, node_outputs) layer_attrs["start"] = inputs_name[1]
slice_inputs.append(input_node_name) current_inputs.append(inputs_name[1])
end_node = list(node.inputs())[2].node() # 处理输入2,即%75
end_unique_id = list(node.inputs())[2].unique() if inputs_name[2] in mapper.attrs:
end_node_name = mapper.outputs_info[end_unique_id] layer_attrs["end"] = mapper.attrs[inputs_name[2]]
slice_inputs.append(end_node_name) else:
attrs['end'] = mapper.attrs[ layer_attrs["end"] = inputs_name[2]
end_node_name] if end_node_name in mapper.attrs else end_node_name current_inputs.append(inputs_name[2])
if end_node_name not in mapper.attrs: # 处理输入3,即%77
mapper._check_input(graph, end_node, end_node_name, node_outputs) if inputs_name[3] in mapper.attrs:
slice_inputs.append(end_node_name) layer_attrs["step"] = mapper.attrs[inputs_name[3]]
step_node = list(node.inputs())[3].node() else:
step_unique_id = list(node.inputs())[3].unique() layer_attrs["step"] = inputs_name[3]
step_node_name = mapper.outputs_info[step_unique_id] current_inputs.append(inputs_name[3])
slice_inputs.append(step_node_name)
attrs['step'] = mapper.attrs[
step_node_name] if step_node_name in mapper.attrs else step_node_name
if step_node_name not in mapper.attrs:
mapper._check_input(graph, step_node, step_node_name, node_outputs)
slice_inputs.append(step_node_name)
graph.add_layer( graph.add_layer(
"prim.slice", "prim.slice", inputs=layer_inputs, outputs=layer_outputs, **layer_attrs)
inputs={'input': input_node_name}, return current_inputs, current_outputs
outputs=[output_name],
**attrs)
return [input_node_name], node_outputs
def aten_t(mapper, graph, node): def aten_t(mapper, graph, node):
""" 构造矩阵转置的PaddleLayer。
TorchScript示例:
%109 : Tensor = aten::t(%102)
参数含义:
%109 (Tensor): 输出,转置后的矩阵。
%102 (Tensor): 需要转置的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
input_node = list(node.inputs())[0].node() layer_inputs = {}
input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[input_unique_id] # 处理输入0,即%x.12
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer( graph.add_layer(
"fluid.layers.transpose", "fluid.layers.transpose",
inputs={"x": input_node_name}, inputs=layer_inputs,
outputs=[output_name], outputs=layer_outputs,
perm=[1, 0]) perm=[1, 0])
return [input_node_name], node_outputs return current_inputs, current_outputs
...@@ -19,13 +19,12 @@ from x2paddle.core.util import * ...@@ -19,13 +19,12 @@ from x2paddle.core.util import *
def prim_Constant(mapper, graph, node): def prim_Constant(mapper, graph, node):
""" 构造constant的PaddleLayer,该节点实现常量赋值。 """ 构造constant的PaddleLayer,该节点实现常量赋值。
PyTorch Script 示例: TorchScript示例:
%2 : int = prim::Constant[value=-1]() %2 : int = prim::Constant[value=-1]()
参数含义: 参数含义:
%2 (常量类型由赋值类型定义,该示例中为int型): 常量赋值结果输出。 %2 (常量类型由赋值类型定义,该示例中为int型): 常量赋值结果输出。
""" """
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
output = list(node.outputs())[0] output = list(node.outputs())[0]
value = output.toIValue() value = output.toIValue()
mapper.attrs[output_name] = value mapper.attrs[output_name] = value
...@@ -33,20 +32,19 @@ def prim_Constant(mapper, graph, node): ...@@ -33,20 +32,19 @@ def prim_Constant(mapper, graph, node):
value = string(value) value = string(value)
graph.add_layer( graph.add_layer(
"prim.constant", inputs={}, outputs=[output_name], value=value) "prim.constant", inputs={}, outputs=[output_name], value=value)
return [], node_outputs return [], [output_name]
def prim_GetAttr(mapper, graph, node): def prim_GetAttr(mapper, graph, node):
""" 获取attribute信息。 """ 获取attribute信息。
PyTorch Script 示例: TorchScript示例:
%27 : Tensor? = prim::GetAttr[name="bias"](%7) %27 : Tensor? = prim::GetAttr[name="bias"](%7)
参数含义: 参数含义:
%7 (Tensor): 输入Tensor。 %7 (Tensor): 输入Tensor。
%27 (Tensor): 输入Tensor。 %27 (Tensor): 输入Tensor。
""" """
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
field_name_list = [node.s('name')] field_name_list = [node.s('name')]
while True: while True:
input_node = list(node.inputs())[0].node() input_node = list(node.inputs())[0].node()
...@@ -63,13 +61,13 @@ def prim_GetAttr(mapper, graph, node): ...@@ -63,13 +61,13 @@ def prim_GetAttr(mapper, graph, node):
param = param.detach().numpy() param = param.detach().numpy()
mapper.pytorch_params[output_name] = param mapper.pytorch_params[output_name] = param
part_script = param part_script = param
return [], node_outputs return [], [output_name]
def prim_ListConstruct(mapper, graph, node): def prim_ListConstruct(mapper, graph, node):
""" 构造list的PaddleLayer。 """ 构造list的PaddleLayer。
PyTorch Script 示例: TorchScript示例:
%86 : int[] = prim::ListConstruct(%84, %85) %86 : int[] = prim::ListConstruct(%84, %85)
参数含义: 参数含义:
%84 (int/其他): list第一个元素信息。 %84 (int/其他): list第一个元素信息。
...@@ -77,42 +75,48 @@ def prim_ListConstruct(mapper, graph, node): ...@@ -77,42 +75,48 @@ def prim_ListConstruct(mapper, graph, node):
%86 (list): list节点输出。 %86 (list): list节点输出。
""" """
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
inputs = {} layer_inputs = {}
for i, input_ivalue in enumerate(node.inputs()): inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node = input_ivalue.node() # 处理每个输入
script_input_unique_id = input_ivalue.unique() for i, input_name in enumerate(inputs_name):
input_node_name = mapper.outputs_info[script_input_unique_id] layer_inputs["input{}".format(i)] = input_name
inputs['input{}'.format(i)] = input_node_name # 获取当前节点输入、输出的list
graph.add_layer("prim.list", inputs=inputs, outputs=[output_name]) current_inputs = list(layer_inputs.values())
return list(inputs.values()), node_outputs current_outputs = layer_outputs
graph.add_layer("prim.list", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def prim_RaiseException(mapper, graph, node): def prim_RaiseException(mapper, graph, node):
""" 构造抛出异常的PaddleLayer。 """ 构造抛出异常的PaddleLayer。
PyTorch Script 示例: TorchScript示例:
= prim::RaiseException(%76) = prim::RaiseException(%76)
参数含义: 参数含义:
%76 (str): 异常信息。 %76 (str): 异常信息。
""" """
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
input_node = list(node.inputs())[0].node() layer_inputs = {}
script_input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[script_input_unique_id] # 处理输入0,即%76
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer( graph.add_layer(
"prim.exception", "prim.exception", inputs=layer_inputs, outputs=layer_outputs)
inputs={'input': input_node_name}, return current_inputs, current_outputs
outputs=[output_name])
return [input_node_name], node_outputs
def prim_Loop(mapper, graph, node): def prim_Loop(mapper, graph, node):
""" 构造loop循环的PaddleLayer。 """ 构造loop循环的PaddleLayer。
PyTorch Script 示例: TorchScript示例:
%x : Tensor = prim::Loop(%4, %3, %x.3) %x : Tensor = prim::Loop(%4, %3, %x.3)
block0(%i : int, %x.12 : Tensor): block0(%i : int, %x.12 : Tensor):
%72 : int[] = prim::Constant[value=[6, 6]]() %72 : int[] = prim::Constant[value=[6, 6]]()
...@@ -125,11 +129,10 @@ def prim_Loop(mapper, graph, node): ...@@ -125,11 +129,10 @@ def prim_Loop(mapper, graph, node):
%x.3 (Tensor): 循环中修改的Tensor。 %x.3 (Tensor): 循环中修改的Tensor。
%x (Tensor): loop循环的输出,与%x.5对应。 %x (Tensor): loop循环的输出,与%x.5对应。
""" """
output_name = mapper._get_outputs_name(node)[0] node_outputs = mapper._get_outputs_name(node)
node_outputs = [output_name]
loop_inputs = {} loop_inputs = {}
block = list(node.blocks())[0] block = list(node.blocks())[0]
loop_outputs = [output_name] loop_outputs = node_outputs
for i, block_input_ivalue in enumerate(block.inputs()): for i, block_input_ivalue in enumerate(block.inputs()):
block_input_node_name = 'x' + str(mapper.output_index) block_input_node_name = 'x' + str(mapper.output_index)
unique_id = block_input_ivalue.unique() unique_id = block_input_ivalue.unique()
...@@ -161,7 +164,7 @@ def prim_Loop(mapper, graph, node): ...@@ -161,7 +164,7 @@ def prim_Loop(mapper, graph, node):
graph.add_layer("prim.loop", inputs=loop_inputs, outputs=loop_outputs) graph.add_layer("prim.loop", inputs=loop_inputs, outputs=loop_outputs)
current_layer = list(graph.layers.values())[-1] current_layer = list(graph.layers.values())[-1]
block_graph, graph_inputs = mapper.traverse(block, node, current_layer) block_graph, graph_inputs = mapper.traverse(block, current_layer)
for i, input_name in enumerate(graph_inputs): for i, input_name in enumerate(graph_inputs):
if input_name == loop_outputs[1]: if input_name == loop_outputs[1]:
continue continue
...@@ -173,7 +176,7 @@ def prim_Loop(mapper, graph, node): ...@@ -173,7 +176,7 @@ def prim_Loop(mapper, graph, node):
def prim_If(mapper, graph, node): def prim_If(mapper, graph, node):
""" 构造if控制流的PaddleLayer。 """ 构造if控制流的PaddleLayer。
PyTorch Script 示例: TorchScript示例:
%input.5 : Tensor = prim::If(%107) %input.5 : Tensor = prim::If(%107)
block0(): block0():
%109 : Tensor = aten::t(%102) %109 : Tensor = aten::t(%102)
...@@ -196,14 +199,14 @@ def prim_If(mapper, graph, node): ...@@ -196,14 +199,14 @@ def prim_If(mapper, graph, node):
graph.add_layer("prim.if", {'input': input_node_name}, [output_name]) graph.add_layer("prim.if", {'input': input_node_name}, [output_name])
current_layer = list(graph.layers.values())[-1] current_layer = list(graph.layers.values())[-1]
block0 = list(node.blocks())[0] block0 = list(node.blocks())[0]
block0_graph, graph_inputs0 = mapper.traverse(block0, node, current_layer) block0_graph, graph_inputs0 = mapper.traverse(block0, current_layer)
len0 = 0 len0 = 0
for i, input_name in enumerate(graph_inputs0): for i, input_name in enumerate(graph_inputs0):
current_layer.inputs['input-{}'.format(i)] = input_name current_layer.inputs['input-{}'.format(i)] = input_name
len0 = i len0 = i
current_layer.add_block(block0_graph) current_layer.add_block(block0_graph)
block1 = list(node.blocks())[1] block1 = list(node.blocks())[1]
block1_graph, graph_inputs1 = mapper.traverse(block1, node, current_layer) block1_graph, graph_inputs1 = mapper.traverse(block1, current_layer)
for i, input_name in enumerate(graph_inputs1): for i, input_name in enumerate(graph_inputs1):
current_layer.inputs['input-{}'.format(len0 + 1 + i)] = input_name current_layer.inputs['input-{}'.format(len0 + 1 + i)] = input_name
current_layer.add_block(block1_graph) current_layer.add_block(block1_graph)
...@@ -213,18 +216,22 @@ def prim_If(mapper, graph, node): ...@@ -213,18 +216,22 @@ def prim_If(mapper, graph, node):
def prim_min(mapper, graph, node): def prim_min(mapper, graph, node):
""" 构造min的PaddleLayer。 """ 构造min的PaddleLayer。
PyTorch Script 示例: TorchScript示例:
%87 : int = prim::min(%86) %87 : int = prim::min(%86)
参数含义: 参数含义:
%86 (list): 输入。 %86 (list): 输入。
%87 (int): 输出。 %87 (int): 输出。
""" """
output_name = mapper._get_outputs_name(node)[0] output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name] layer_outputs = [output_name]
input_node = list(node.inputs())[0].node() layer_inputs = {}
script_input_unique_id = list(node.inputs())[0].unique() inputs_name, inputs_node = mapper._get_inputs_name(node)
input_node_name = mapper.outputs_info[script_input_unique_id] # 处理输入0,即%86
mapper._check_input(graph, input_node, input_node_name, node_outputs) mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
graph.add_layer( layer_inputs["input"] = inputs_name[0]
"prim.min", inputs={'input': input_node_name}, outputs=[output_name]) # 获取当前节点输入、输出的list
return [input_node_name], node_outputs current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.min", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
...@@ -34,7 +34,7 @@ class PyTorchOpMapper(OpMapper): ...@@ -34,7 +34,7 @@ class PyTorchOpMapper(OpMapper):
# 转换 # 转换
self.graph, _ = self.traverse(decoder.graph) self.graph, _ = self.traverse(decoder.graph)
def traverse(self, script_graph, control_node=None, father_layer=None): def traverse(self, script_graph, parent_layer=None):
# 用于获取graph的输入 # 用于获取graph的输入
def _update_graph_inputs(inputs, outputs): def _update_graph_inputs(inputs, outputs):
current_node_outputs.extend(outputs) current_node_outputs.extend(outputs)
...@@ -43,7 +43,7 @@ class PyTorchOpMapper(OpMapper): ...@@ -43,7 +43,7 @@ class PyTorchOpMapper(OpMapper):
graph_inputs.append(name) graph_inputs.append(name)
# 初始化 # 初始化
graph = PaddleGraph(father_layer) graph = PaddleGraph(parent_layer)
current_node_outputs = [] current_node_outputs = []
graph_inputs = [] graph_inputs = []
# 转换输入节点 # 转换输入节点
...@@ -71,7 +71,7 @@ class PyTorchOpMapper(OpMapper): ...@@ -71,7 +71,7 @@ class PyTorchOpMapper(OpMapper):
# 转换输出节点 # 转换输出节点
if hasattr(script_graph, 'returnNode'): if hasattr(script_graph, 'returnNode'):
for i, ivalue in enumerate(script_graph.returnNode().inputs()): for i, ivalue in enumerate(script_graph.returnNode().inputs()):
if control_node.kind() == "prim::Loop" and i == 0: if parent_layer.kernel == "prim.loop" and i == 0:
continue continue
node = ivalue.node() node = ivalue.node()
script_unique_id = ivalue.unique() script_unique_id = ivalue.unique()
...@@ -79,7 +79,7 @@ class PyTorchOpMapper(OpMapper): ...@@ -79,7 +79,7 @@ class PyTorchOpMapper(OpMapper):
graph, graph,
node, node,
uid=script_unique_id, uid=script_unique_id,
control_node=control_node, parent_layer=parent_layer,
index=i) index=i)
_update_graph_inputs(inputs, outputs) _update_graph_inputs(inputs, outputs)
# 设置graph的参数 # 设置graph的参数
...@@ -129,6 +129,17 @@ class PyTorchOpMapper(OpMapper): ...@@ -129,6 +129,17 @@ class PyTorchOpMapper(OpMapper):
value=string(param) if isinstance(param, str) else param) value=string(param) if isinstance(param, str) else param)
node_outputs.append(output_name) node_outputs.append(output_name)
def _get_inputs_name(self, node):
inputs_name = []
inputs_node = []
for script_input_ivalue in node.inputs():
script_input_node = script_input_ivalue.node()
script_input_unique_id = script_input_ivalue.unique()
input_node_name = self.outputs_info[script_input_unique_id]
inputs_node.append(script_input_node)
inputs_name.append(input_node_name)
return inputs_name, inputs_node
def data(self, graph, node, uid): def data(self, graph, node, uid):
for output_ivalue in node.outputs(): for output_ivalue in node.outputs():
script_unique_id = output_ivalue.unique() script_unique_id = output_ivalue.unique()
...@@ -145,17 +156,14 @@ class PyTorchOpMapper(OpMapper): ...@@ -145,17 +156,14 @@ class PyTorchOpMapper(OpMapper):
value=output_name) value=output_name)
return [], [output_name] return [], [output_name]
def equal(self, graph, node, uid=None, control_node=None, index=None): def equal(self, graph, node, uid=None, parent_layer=None, index=None):
if control_node is not None and index is not None: if parent_layer is not None and index is not None:
kind = control_node.kind()
# block的输出 # block的输出
input_node_name = self.outputs_info[uid] input_node_name = self.outputs_info[uid]
control_output_id = index control_output_id = index
if kind == "prim::Loop": if parent_layer.kernel == "prim.loop":
control_output_id = index - 1 control_output_id = index - 1
output_ivalue = list(control_node.outputs())[ output_node_name = parent_layer.outputs[control_output_id]
control_output_id].unique()
output_node_name = self.outputs_info[output_ivalue]
graph.add_layer( graph.add_layer(
"prim.equal", "prim.equal",
inputs={'input': input_node_name}, inputs={'input': input_node_name},
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册