提交 4c85cdff 编写于 作者: S SunAhong1993

add comment and add readability

上级 795b3c1b
......@@ -59,14 +59,14 @@ class PaddleLayer(object):
class PaddleGraph(object):
def __init__(self, father_layer=None, graph_type="dygraph"):
def __init__(self, parent_layer=None, graph_type="dygraph"):
self.layers = OrderedDict()
self.edges_out = dict()
self.edges_in = dict()
self.inputs = list()
self.outputs = list()
self.parameters = dict()
self.father_layer = father_layer
self.parent_layer = parent_layer
self.graph_type = graph_type
def set_name(self, name):
......@@ -89,9 +89,9 @@ class PaddleGraph(object):
def add_layer(self, kernel, inputs, outputs, **kwargs):
layer_id = str(len(self.layers))
if self.father_layer is not None:
layer_id = "{}.{}.{}".format(self.father_layer.id,
len(self.father_layer.blocks),
if self.parent_layer is not None:
layer_id = "{}.{}.{}".format(self.parent_layer.id,
len(self.parent_layer.blocks),
layer_id)
layer = PaddleLayer(layer_id, kernel, inputs, outputs, **kwargs)
self.layers[layer_id] = layer
......@@ -135,7 +135,7 @@ class PaddleGraph(object):
self.get_dygraph_outputs()
def get_global_layers(self):
# 该全局layers的信息是按住奥拓扑排序组成的
# 该全局layers的信息是按拓扑排序组成的
def update(layers):
global_layers = dict()
for layer_id, layer in layers.items():
......@@ -295,8 +295,7 @@ class PaddleGraph(object):
continue
if self.edges_out.get(layer_id, 0) == 0:
for output_name in layer.outputs:
if output_name.endswith(
"_assert") or not output_name.startswith("x"):
if not output_name.startswith("x"):
continue
self.outputs.append(output_name)
self.outputs = list(set(self.outputs))
......@@ -358,7 +357,7 @@ class PaddleGraph(object):
for layer_id, layer in self.layers.items():
if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get(
layer_id, 0) == 0:
layer_id, 0) == 0 and layer.kernel != "prim.assert":
continue
if "dygraph" in layer.kernel:
line = "{}".format(
......
......@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import torch
......
......@@ -18,7 +18,7 @@ from x2paddle.core.util import *
def aten_adaptive_avg_pool2d(mapper, graph, node):
""" 构造average adaptive pool2d的PaddleLayer。
PyTorch Script 示例:
TorchScript示例:
%x.5 : Tensor = aten::adaptive_avg_pool2d(%x.3, %_output_size.1)
参数含义:
%x.5 (Tensor): 池化后结果Tensor。
......@@ -26,34 +26,36 @@ def aten_adaptive_avg_pool2d(mapper, graph, node):
%_output_size.1 (list): 自适应池化后的Tensor的宽、高大小。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
adapoo2d_inputs = []
input_node = list(node.inputs())[0].node()
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
adapoo2d_inputs.append(input_node_name)
attr_node = list(node.inputs())[1].node()
attr_unique_id = list(node.inputs())[1].unique()
attr_node_name = mapper.outputs_info[attr_unique_id]
attrs = {}
attrs["pool_size"] = mapper.attrs[
attr_node_name] if attr_node_name in mapper.attrs else attr_node_name
if attr_node_name not in mapper.attrs:
adapoo2d_inputs.append(attr_node_name)
attrs["pool_type"] = string("avg")
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%x.3
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
# 处理输入1,即%_output_size.1
if inputs_name[1] in mapper.attrs:
layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]]
else:
layer_attrs["pool_size"] = inputs_name[1]
current_inputs.append(inputs_name[1])
layer_attrs["pool_type"] = string("avg")
graph.add_layer(
"fluid.layers.adaptive_pool2d",
inputs={"input": input_node_name},
outputs=[output_name],
**attrs)
return [input_node_name], node_outputs
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_addmm(mapper, graph, node):
""" 构造addmm的PaddleLayer,该节点实现out = alpha ∗ x ∗ y + beta ∗ input。
PyTorch Script 示例:
TorchScript示例:
%ret.2 : Tensor = aten::addmm(%150, %input.3, %156, %151, %152)
参数含义:
%ret.2 (Tensor): addmm结果Tensor。
......@@ -64,52 +66,48 @@ def aten_addmm(mapper, graph, node):
%152 (int/float): 输入beta。
"""
output_name = mapper._get_outputs_name(node)[0]
inputs = {}
attrs = {}
addmm_inputs = []
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%150
mapper._check_input(
graph, input_node, input_node_name, node_outputs, add_dim=True)
inputs['input'] = input_node_name
addmm_inputs.append(input_node_name)
x_node = list(node.inputs())[1].node()
x_unique_id = list(node.inputs())[1].unique()
x_node_name = mapper.outputs_info[x_unique_id]
mapper._check_input(graph, x_node, x_node_name, node_outputs)
inputs['x'] = x_node_name
addmm_inputs.append(x_node_name)
y_node = list(node.inputs())[2].node()
y_unique_id = list(node.inputs())[2].unique()
y_node_name = mapper.outputs_info[y_unique_id]
mapper._check_input(graph, y_node, y_node_name, node_outputs)
inputs['y'] = y_node_name
addmm_inputs.append(y_node_name)
beta_node = list(node.inputs())[3].node()
beta_unique_id = list(node.inputs())[3].unique()
beta_node_name = mapper.outputs_info[beta_unique_id]
attrs['beta'] = mapper.attrs[
beta_node_name] if beta_node_name in mapper.attrs else beta_node_name
if beta_node_name not in mapper.attrs:
addmm_inputs.append(beta_node_name)
alpha_node = list(node.inputs())[4].node()
alpha_unique_id = list(node.inputs())[4].unique()
alpha_node_name = mapper.outputs_info[alpha_unique_id]
attrs['alpha'] = mapper.attrs[
alpha_node_name] if alpha_node_name in mapper.attrs else alpha_node_name
if alpha_node_name not in mapper.attrs:
addmm_inputs.append(alpha_node_name)
graph, inputs_node[0], inputs_name[0], layer_outputs, add_dim=True)
layer_inputs["input"] = inputs_name[0]
# 处理输入1,即%input.3
mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
layer_inputs["x"] = inputs_name[1]
# 处理输入2,即%156
mapper._check_input(graph, inputs_node[2], inputs_name[2], layer_outputs)
layer_inputs["y"] = inputs_name[2]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
# 处理输入3,即%152
if inputs_name[3] in mapper.attrs:
layer_attrs["beta"] = mapper.attrs[inputs_name[3]]
else:
layer_attrs["beta"] = inputs_name[3]
current_inputs.append(inputs_name[3])
# 处理输入4,即%151
if inputs_name[4] in mapper.attrs:
layer_attrs["alpha"] = mapper.attrs[inputs_name[4]]
else:
layer_attrs["alpha"] = inputs_name[4]
current_inputs.append(inputs_name[4])
graph.add_layer(
"fluid.layers.addmm", inputs=inputs, outputs=[output_name], **attrs)
return addmm_inputs, node_outputs
"fluid.layers.addmm",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_add_(mapper, graph, node):
""" 构造add的PaddleLayer,该节点实现out = x + alpha * y。
PyTorch Script 示例:
TorchScript示例:
%output.5 : Tensor = aten::add_(%output.2, %150, %151)
参数含义:
%output.5 (Tensor): add结果Tensor。
......@@ -118,403 +116,573 @@ def aten_add_(mapper, graph, node):
%151 (int/float): 输入alpha。
"""
output_name = mapper._get_outputs_name(node)[0]
inputs = {}
attrs = {}
add_inputs = []
node_outputs = [output_name]
x_node = list(node.inputs())[0].node()
x_unique_id = list(node.inputs())[0].unique()
x_node_name = mapper.outputs_info[x_unique_id]
mapper._check_input(graph, x_node, x_node_name, node_outputs)
inputs['x'] = x_node_name
add_inputs.append(x_node_name)
y_node = list(node.inputs())[1].node()
y_unique_id = list(node.inputs())[1].unique()
y_node_name = mapper.outputs_info[y_unique_id]
mapper._check_input(graph, y_node, y_node_name, node_outputs, add_dim=True)
inputs['y'] = y_node_name
add_inputs.append(y_node_name)
alpha_node = list(node.inputs())[2].node()
alpha_unique_id = list(node.inputs())[2].unique()
alpha_node_name = mapper.outputs_info[alpha_unique_id]
attrs['alpha'] = mapper.attrs[
alpha_node_name] if alpha_node_name in mapper.attrs else alpha_node_name
if alpha_node_name not in mapper.attrs:
add_inputs.append(alpha_node_name)
graph.add_layer("prim.add", inputs=inputs, outputs=[output_name], **attrs)
return add_inputs, node_outputs
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%output.2
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%150
mapper._check_input(
graph, inputs_node[1], inputs_name[1], layer_outputs, add_dim=True)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
# 处理输入2,即%151
if inputs_name[2] in mapper.attrs:
layer_attrs["alpha"] = mapper.attrs[inputs_name[2]]
else:
layer_attrs["alpha"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
"prim.add", inputs=layer_inputs, outputs=layer_outputs, **layer_attrs)
return current_inputs, current_outputs
def aten_append(mapper, graph, node):
""" 构造对list进行append的PaddleLayer。
TorchScript示例:
%90 : int[] = aten::append(%_output_size.1, %v.1)
参数含义:
%90 (list): 输出,append后的list。
%_output_size.1 (list): 需要进行append的list。
%v.1 (-): append的元素。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
inputs = {}
for i, input_ivalue in enumerate(node.inputs()):
input_node = input_ivalue.node()
input_unique_id = input_ivalue.unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
if i == 0:
inputs['list'] = input_node_name
else:
inputs['element'] = input_node_name
graph.add_layer("prim.append", inputs=inputs, outputs=[output_name])
return list(inputs.values()), node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即_output_size.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["list"] = inputs_name[0]
# 处理输入1,即v.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
layer_inputs["element"] = inputs_name[1]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.append", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_conv2d(mapper, graph, node):
output_name = mapper._get_outputs_name(node)[0]
inputs = {}
attrs = {}
conv2d_inputs = []
node_outputs = [output_name]
""" 构造conv2d的PaddleLayer。
TorchScript示例:
%input.10 : Tensor = aten::conv2d(%input.8, %25, %27, %28, %29, %30, %26)
参数含义:
%input.10 (Tensor): 输出,卷积后的结果。
%input.8 (Tensor): 需要进行卷积的特征层。
%25 (Tensor): weights。
%27 (Tensor): bias。
%28 (int): 步长大小。
%29 (int): 填充大小。
%30 (int): 膨胀系数大小。
%26 (int): 卷积的组数。
"""
if "conv" in mapper.dygraph_name_id:
mapper.dygraph_name_id["conv"] += 1
else:
mapper.dygraph_name_id["conv"] = 0
conv2d_name = "conv" + str(mapper.dygraph_name_id["conv"])
# 输入input
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
inputs['input'] = input_node_name
conv2d_inputs.append(input_node_name)
# 输入weight
weight_node = list(node.inputs())[1].node()
weight_unique_id = list(node.inputs())[1].unique()
weight_node_name = mapper.outputs_info[weight_unique_id]
weights = mapper.pytorch_params[weight_node_name]
mapper.paddle_params[conv2d_name + '.weight'] = weights
attrs['num_filters'] = weights.shape[0]
attrs['filter_size'] = weights.shape[2:]
# 输入bias
bias_node = list(node.inputs())[2].node()
bias_unique_id = list(node.inputs())[2].unique()
bias_node_name = mapper.outputs_info[bias_unique_id]
if bias_node_name in mapper.pytorch_params:
bias = mapper.pytorch_params[bias_node_name]
mapper.paddle_params[conv2d_name + '.bias'] = bias
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [conv2d_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%input.8
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs[1:]
# 处理输入1,即%25
weights = mapper.pytorch_params[inputs_name[1]]
mapper.paddle_params[conv2d_name + ".weight"] = weights
layer_attrs["num_filters"] = weights.shape[0]
layer_attrs["filter_size"] = weights.shape[2:]
# 处理输入2,即%27
if inputs_name[2] in mapper.pytorch_params:
bias = mapper.pytorch_params[inputs_name[2]]
mapper.paddle_params[conv2d_name + ".bias"] = bias
else:
mapper.paddle_params[conv2d_name + '.bias'] = False
# 输入stride
stride_node = list(node.inputs())[3].node()
stride_unique_id = list(node.inputs())[3].unique()
stride_node_name = mapper.outputs_info[stride_unique_id]
attrs['stride'] = mapper.attrs[stride_node_name]
# 输入padding
padding_node = list(node.inputs())[4].node()
padding_unique_id = list(node.inputs())[4].unique()
padding_node_name = mapper.outputs_info[padding_unique_id]
attrs['padding'] = mapper.attrs[padding_node_name]
# 输入dilation
dilation_node = list(node.inputs())[5].node()
dilation_unique_id = list(node.inputs())[5].unique()
dilation_node_name = mapper.outputs_info[dilation_unique_id]
attrs['dilation'] = mapper.attrs[dilation_node_name]
# 输入group
groups_node = list(node.inputs())[6].node()
groups_unique_id = list(node.inputs())[6].unique()
groups_node_name = mapper.outputs_info[groups_unique_id]
attrs['groups'] = mapper.attrs[groups_node_name]
attrs['num_channels'] = weights.shape[1] * mapper.attrs[groups_node_name]
mapper.paddle_params[conv2d_name + ".bias"] = False
# 处理输入3,即%28
layer_attrs["stride"] = mapper.attrs[inputs_name[3]]
# 处理输入4,即%29
layer_attrs["padding"] = mapper.attrs[inputs_name[4]]
# 处理输入5,即%30
layer_attrs["dilation"] = mapper.attrs[inputs_name[5]]
# 处理输入6,即%26
layer_attrs["groups"] = mapper.attrs[inputs_name[6]]
layer_attrs['num_channels'] = weights.shape[1] * mapper.attrs[inputs_name[
6]]
graph.add_layer(
"fluid.dygraph.Conv2D",
inputs=inputs,
outputs=[conv2d_name, output_name],
**attrs)
return conv2d_inputs, node_outputs
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_dim(mapper, graph, node):
""" 构造获取维度的PaddleLayer。
TorchScript示例:
%106 : int = aten::dim(%101)
参数含义:
%106 (int): 输出,Tensor的维度。
%101 (Tensor): 输入的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
graph.add_layer(
"prim.shape", inputs={'input': input_node_name}, outputs=[output_name])
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%input.8
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"prim.len", inputs={'input': output_name}, outputs=[output_name])
return [input_node_name], node_outputs
"prim.len", inputs={"input": output_name}, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_dropout(mapper, graph, node):
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
""" 构造Dropout的PaddleLayer。
TorchScript示例:
%119 : Tensor = aten::dropout(%result.3, %117, %118)
参数含义:
%119 (Tensor): Dropout后的Tensor。
%result.3 (Tensor): 输入Tensor。
%118 (bool): 是否是训练阶段。
"""
if "dropout" in mapper.dygraph_name_id:
mapper.dygraph_name_id["dropout"] += 1
else:
mapper.dygraph_name_id["dropout"] = 0
dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"])
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [dropout_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%119
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs[1:]
graph.add_layer(
"fluid.dygraph.Dropout",
inputs={"input": input_node_name},
outputs=[dropout_name, output_name],
inputs=layer_inputs,
outputs=layer_outputs,
p=0.0)
return [input_node_name], node_outputs
return current_inputs, current_outputs
def aten_eq(mapper, graph, node):
""" 构造判断数值是否相等的PaddleLayer。
TorchScript示例:
%125 : bool = aten::eq(%124, %123)
参数含义:
%125 (bool): 对比后结果。
%124 (-): 需对比的输入1。
%123 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
inputs = {}
eq_inputs = []
for i, input_ivalue in enumerate(node.inputs()):
input_node = input_ivalue.node()
input_unique_id = input_ivalue.unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
inputs['eq{}'.format(i)] = input_node_name
eq_inputs.append(input_node_name)
graph.add_layer("prim.eq", inputs=inputs, outputs=[output_name])
return list(inputs.values()), node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["eq0"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
layer_inputs["eq1"] = inputs_name[1]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_flatten(mapper, graph, node):
# 目前只支持第一维的flatten
""" 构造flatten的PaddleLayer。
TorchScript示例:
%x.8 : Tensor = aten::flatten(%x, %4, %2)
参数含义:
%x.8 (Tensor): flatten后结果。
%x (Tensor): 输入Tensor。
%4 (int): flatten的开始维度。
%2 (int): flatten的结束维度。
注意:目前flatten只支持第一维的flatten
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
flatten_inputs = []
for i, input_ivalue in enumerate(node.inputs()):
if i == 0:
continue
input_node = input_ivalue.node()
input_unique_id = input_ivalue.unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入1,即%4
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[1]],
type='eq',
key=mapper.attrs[inputs_name[1]],
value=1)
# 处理输入2,即%2
graph.add_layer(
"prim.assert",
inputs={},
outputs=[output_name + '_assert'],
outputs=[inputs_name[2]],
type='eq',
key=mapper.attrs[input_node_name],
value=1 if i == 1 else -1)
flatten_inputs.append(input_node_name)
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
key=mapper.attrs[inputs_name[2]],
value=-1)
# 处理输入0,即%x
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer(
"fluid.layers.flatten",
inputs={'x': input_node_name},
outputs=[output_name],
inputs=layer_inputs,
outputs=layer_outputs,
axis=1)
flatten_inputs.append(input_node_name)
return flatten_inputs, node_outputs
return current_inputs, current_outputs
def aten___getitem__(mapper, graph, node):
""" 构造获取list中元素的PaddleLayer。
TorchScript示例:
%v.1 : int = aten::__getitem__(%72, %88)
参数含义:
%v.1 (-): 输出,list中的元素。
%72 (list): 需要获取元素的list。
%88 (int): 索引。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
inputs = {}
for i, input_ivalue in enumerate(node.inputs()):
input_node = input_ivalue.node()
input_unique_id = input_ivalue.unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
if i == 0:
inputs['list'] = input_node_name
else:
inputs['index'] = input_node_name
graph.add_layer("prim.getitem", inputs=inputs, outputs=[output_name])
return list(inputs.values()), node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%72
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["list"] = inputs_name[0]
# 处理输入1,即%88
mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
layer_inputs["index"] = inputs_name[1]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.getitem", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_le(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
TorchScript示例:
%80 : bool = aten::le(%78, %79)
参数含义:
%80 (bool): 输出,第一个元素是否小于第二个元素。
%78 (-): 需对比的输入1。
%79 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
inputs = {}
for i, input_ivalue in enumerate(node.inputs()):
input_node = input_ivalue.node()
input_unique_id = input_ivalue.unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
inputs['input{}'.format(i)] = input_node_name
graph.add_layer("prim.le", inputs=inputs, outputs=[output_name])
return list(inputs.values()), node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%78
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input0"] = inputs_name[0]
# 处理输入1,即%79
mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
layer_inputs["input1"] = inputs_name[1]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.le", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_len(mapper, graph, node):
""" 构造获取list长度的PaddleLayer。
TorchScript示例:
%85 : int = aten::len(%83)
参数含义:
%85 (int): 输出,list的长度。
%72 (list): 需要获取长度的list。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
graph.add_layer(
"prim.len", inputs={'input': input_node_name}, outputs=[output_name])
return [input_node_name], node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%72
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.len", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_max_pool2d(mapper, graph, node):
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
inputs = {}
attrs = {}
pool_inputs = []
""" 构造最大池化的PaddleLayer。
TorchScript示例:
%input.8 : Tensor = aten::max_pool2d(%result.11, %20, %23, %21, %22, %19)
参数含义:
%input.8 (Tensor): 输出,池化后的结果。
%result.11 (Tensor): 需要池化的Tensor。
%20 (list): 池化kernel的大小。
%23 (list): 步长大小。
%21 (list): 填充大小。
%22 (list): 膨胀系数大小。
%19 (bool): 是否用ceil函数计算输出高度和宽度。
"""
if "pool" in mapper.dygraph_name_id:
mapper.dygraph_name_id["pool"] += 1
else:
mapper.dygraph_name_id["pool"] = 0
pool_name = "pool" + str(mapper.dygraph_name_id["pool"])
for i, input_ivalue in enumerate(node.inputs()):
input_node = input_ivalue.node()
input_unique_id = input_ivalue.unique()
input_node_name = mapper.outputs_info[input_unique_id]
if i == 0:
mapper._check_input(graph, input_node, input_node_name,
node_outputs)
inputs['input'] = input_node_name
pool_inputs.append(input_node_name)
elif i == 1:
attrs['pool_size'] = mapper.attrs[input_node_name]
elif i == 2:
attrs['pool_stride'] = mapper.attrs[input_node_name]
elif i == 3:
attrs['pool_padding'] = mapper.attrs[input_node_name]
elif i == 4:
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [pool_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%result.11
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs[1:]
# 处理输入1,即%20
layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%23
layer_attrs["pool_stride"] = mapper.attrs[inputs_name[2]]
# 处理输入3,即%21
layer_attrs["pool_padding"] = mapper.attrs[inputs_name[3]]
# 处理输入4,即%22
graph.add_layer(
"prim.assert",
inputs={},
outputs=[output_name + '_assert'],
type='eq',
key=mapper.attrs[input_node_name],
outputs=[inputs_name[4]],
type="eq",
key=mapper.attrs[inputs_name[4]],
value=[1, [1, 1]])
pool_inputs.append(input_node_name)
elif i == 5:
attrs['ceil_mode'] = mapper.attrs[
input_node_name] if input_node_name in mapper.attrs else input_node_name
if input_node_name not in mapper.attrs:
pool_inputs.append(input_node_name)
attrs['pool_type'] = string('max')
# 处理输入5,即%19
layer_attrs["ceil_mode"] = mapper.attrs[inputs_name[5]]
layer_attrs["pool_type"] = string("max")
graph.add_layer(
"fluid.dygraph.Pool2D",
inputs=inputs,
outputs=[pool_name, output_name],
**attrs)
return pool_inputs, node_outputs
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_matmul(mapper, graph, node):
""" 构造矩阵相乘的PaddleLayer。
TorchScript示例:
%output.2 : Tensor = aten::matmul(%101, %111)
参数含义:
%output.2 (Tensor): 输出,相乘后的结果。
%101 (Tensor): 矩阵1。
%102 (Tensor): 矩阵2。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
inputs = {}
x_node = list(node.inputs())[0].node()
x_unique_id = list(node.inputs())[0].unique()
x_node_name = mapper.outputs_info[x_unique_id]
mapper._check_input(graph, x_node, x_node_name, node_outputs)
inputs['x'] = x_node_name
y_node = list(node.inputs())[1].node()
y_unique_id = list(node.inputs())[1].unique()
y_node_name = mapper.outputs_info[y_unique_id]
inputs['y'] = y_node_name
mapper._check_input(graph, y_node, y_node_name, node_outputs)
graph.add_layer("fluid.layers.matmul", inputs=inputs, outputs=[output_name])
return list(inputs.values()), node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%101
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%102
mapper._check_input(graph, inputs_node[1], inputs_name[1], layer_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer(
"fluid.layers.matmul", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_relu_(mapper, graph, node):
""" 构造ReLU激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu_(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU后的结果。
%result.5 (Tensor): 需要ReLU的Tensor。
注意: inplace这个参数在paddle中未实现
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
# inplace这个参数在paddle中未实现
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer(
"fluid.layers.relu",
inputs={"x": input_node_name},
outputs=[output_name])
return [input_node_name], node_outputs
"fluid.layers.relu", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_relu6(mapper, graph, node):
""" 构造ReLU6激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu6(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU6后的结果。
%result.5 (Tensor): 需要ReLU6的Tensor。
注意: inplace这个参数在paddle中未实现
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
# inplace这个参数在paddle中未实现
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer(
"fluid.layers.relu6",
inputs={"x": input_node_name},
outputs=[output_name],
inputs=layer_inputs,
outputs=layer_outputs,
threshold=6.0)
return [input_node_name], node_outputs
return current_inputs, current_outputs
def aten_size(mapper, graph, node):
""" 构造获取shape的PaddleLayer。
TorchScript示例:
%73 : int[] = aten::size(%x.12)
参数含义:
%73 (list): 输出,shape的list。
%x.12 (Tensor): 需要获取shape的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
graph.add_layer(
"prim.shape", inputs={'input': input_node_name}, outputs=[output_name])
return [input_node_name], node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%x.12
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_slice(mapper, graph, node):
""" 构造切分list的PaddleLayer。
TorchScript示例:
%83 : int[] = aten::slice(%73, %82, %75, %77)
参数含义:
%83 (list): 输出,切分后的list。
%73 (list): 需要切分的list。
%82 (int): 切分的开始索引。
%75 (int): 切分的结束索引。
%77 (int): 切分的步长。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
attrs = {}
slice_inputs = []
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
slice_inputs.append(input_node_name)
strat_node = list(node.inputs())[1].node()
start_unique_id = list(node.inputs())[1].unique()
start_node_name = mapper.outputs_info[start_unique_id]
slice_inputs.append(start_node_name)
attrs['start'] = mapper.attrs[
start_node_name] if start_node_name in mapper.attrs else start_node_name
if start_node_name not in mapper.attrs:
mapper._check_input(graph, strat_node, start_node_name, node_outputs)
slice_inputs.append(input_node_name)
end_node = list(node.inputs())[2].node()
end_unique_id = list(node.inputs())[2].unique()
end_node_name = mapper.outputs_info[end_unique_id]
slice_inputs.append(end_node_name)
attrs['end'] = mapper.attrs[
end_node_name] if end_node_name in mapper.attrs else end_node_name
if end_node_name not in mapper.attrs:
mapper._check_input(graph, end_node, end_node_name, node_outputs)
slice_inputs.append(end_node_name)
step_node = list(node.inputs())[3].node()
step_unique_id = list(node.inputs())[3].unique()
step_node_name = mapper.outputs_info[step_unique_id]
slice_inputs.append(step_node_name)
attrs['step'] = mapper.attrs[
step_node_name] if step_node_name in mapper.attrs else step_node_name
if step_node_name not in mapper.attrs:
mapper._check_input(graph, step_node, step_node_name, node_outputs)
slice_inputs.append(step_node_name)
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%73
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
# 处理输入1,即%82
if inputs_name[1] in mapper.attrs:
layer_attrs["start"] = mapper.attrs[inputs_name[1]]
else:
layer_attrs["start"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%75
if inputs_name[2] in mapper.attrs:
layer_attrs["end"] = mapper.attrs[inputs_name[2]]
else:
layer_attrs["end"] = inputs_name[2]
current_inputs.append(inputs_name[2])
# 处理输入3,即%77
if inputs_name[3] in mapper.attrs:
layer_attrs["step"] = mapper.attrs[inputs_name[3]]
else:
layer_attrs["step"] = inputs_name[3]
current_inputs.append(inputs_name[3])
graph.add_layer(
"prim.slice",
inputs={'input': input_node_name},
outputs=[output_name],
**attrs)
return [input_node_name], node_outputs
"prim.slice", inputs=layer_inputs, outputs=layer_outputs, **layer_attrs)
return current_inputs, current_outputs
def aten_t(mapper, graph, node):
""" 构造矩阵转置的PaddleLayer。
TorchScript示例:
%109 : Tensor = aten::t(%102)
参数含义:
%109 (Tensor): 输出,转置后的矩阵。
%102 (Tensor): 需要转置的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%x.12
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer(
"fluid.layers.transpose",
inputs={"x": input_node_name},
outputs=[output_name],
inputs=layer_inputs,
outputs=layer_outputs,
perm=[1, 0])
return [input_node_name], node_outputs
return current_inputs, current_outputs
......@@ -19,13 +19,12 @@ from x2paddle.core.util import *
def prim_Constant(mapper, graph, node):
""" 构造constant的PaddleLayer,该节点实现常量赋值。
PyTorch Script 示例:
TorchScript示例:
%2 : int = prim::Constant[value=-1]()
参数含义:
%2 (常量类型由赋值类型定义,该示例中为int型): 常量赋值结果输出。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
output = list(node.outputs())[0]
value = output.toIValue()
mapper.attrs[output_name] = value
......@@ -33,20 +32,19 @@ def prim_Constant(mapper, graph, node):
value = string(value)
graph.add_layer(
"prim.constant", inputs={}, outputs=[output_name], value=value)
return [], node_outputs
return [], [output_name]
def prim_GetAttr(mapper, graph, node):
""" 获取attribute信息。
PyTorch Script 示例:
TorchScript示例:
%27 : Tensor? = prim::GetAttr[name="bias"](%7)
参数含义:
%7 (Tensor): 输入Tensor。
%27 (Tensor): 输入Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
field_name_list = [node.s('name')]
while True:
input_node = list(node.inputs())[0].node()
......@@ -63,13 +61,13 @@ def prim_GetAttr(mapper, graph, node):
param = param.detach().numpy()
mapper.pytorch_params[output_name] = param
part_script = param
return [], node_outputs
return [], [output_name]
def prim_ListConstruct(mapper, graph, node):
""" 构造list的PaddleLayer。
PyTorch Script 示例:
TorchScript示例:
%86 : int[] = prim::ListConstruct(%84, %85)
参数含义:
%84 (int/其他): list第一个元素信息。
......@@ -77,42 +75,48 @@ def prim_ListConstruct(mapper, graph, node):
%86 (list): list节点输出。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
inputs = {}
for i, input_ivalue in enumerate(node.inputs()):
input_node = input_ivalue.node()
script_input_unique_id = input_ivalue.unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
inputs['input{}'.format(i)] = input_node_name
graph.add_layer("prim.list", inputs=inputs, outputs=[output_name])
return list(inputs.values()), node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理每个输入
for i, input_name in enumerate(inputs_name):
layer_inputs["input{}".format(i)] = input_name
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.list", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def prim_RaiseException(mapper, graph, node):
""" 构造抛出异常的PaddleLayer。
PyTorch Script 示例:
TorchScript示例:
= prim::RaiseException(%76)
参数含义:
%76 (str): 异常信息。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%76
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer(
"prim.exception",
inputs={'input': input_node_name},
outputs=[output_name])
return [input_node_name], node_outputs
"prim.exception", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def prim_Loop(mapper, graph, node):
""" 构造loop循环的PaddleLayer。
PyTorch Script 示例:
TorchScript示例:
%x : Tensor = prim::Loop(%4, %3, %x.3)
block0(%i : int, %x.12 : Tensor):
%72 : int[] = prim::Constant[value=[6, 6]]()
......@@ -125,11 +129,10 @@ def prim_Loop(mapper, graph, node):
%x.3 (Tensor): 循环中修改的Tensor。
%x (Tensor): loop循环的输出,与%x.5对应。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
node_outputs = mapper._get_outputs_name(node)
loop_inputs = {}
block = list(node.blocks())[0]
loop_outputs = [output_name]
loop_outputs = node_outputs
for i, block_input_ivalue in enumerate(block.inputs()):
block_input_node_name = 'x' + str(mapper.output_index)
unique_id = block_input_ivalue.unique()
......@@ -161,7 +164,7 @@ def prim_Loop(mapper, graph, node):
graph.add_layer("prim.loop", inputs=loop_inputs, outputs=loop_outputs)
current_layer = list(graph.layers.values())[-1]
block_graph, graph_inputs = mapper.traverse(block, node, current_layer)
block_graph, graph_inputs = mapper.traverse(block, current_layer)
for i, input_name in enumerate(graph_inputs):
if input_name == loop_outputs[1]:
continue
......@@ -173,7 +176,7 @@ def prim_Loop(mapper, graph, node):
def prim_If(mapper, graph, node):
""" 构造if控制流的PaddleLayer。
PyTorch Script 示例:
TorchScript示例:
%input.5 : Tensor = prim::If(%107)
block0():
%109 : Tensor = aten::t(%102)
......@@ -196,14 +199,14 @@ def prim_If(mapper, graph, node):
graph.add_layer("prim.if", {'input': input_node_name}, [output_name])
current_layer = list(graph.layers.values())[-1]
block0 = list(node.blocks())[0]
block0_graph, graph_inputs0 = mapper.traverse(block0, node, current_layer)
block0_graph, graph_inputs0 = mapper.traverse(block0, current_layer)
len0 = 0
for i, input_name in enumerate(graph_inputs0):
current_layer.inputs['input-{}'.format(i)] = input_name
len0 = i
current_layer.add_block(block0_graph)
block1 = list(node.blocks())[1]
block1_graph, graph_inputs1 = mapper.traverse(block1, node, current_layer)
block1_graph, graph_inputs1 = mapper.traverse(block1, current_layer)
for i, input_name in enumerate(graph_inputs1):
current_layer.inputs['input-{}'.format(len0 + 1 + i)] = input_name
current_layer.add_block(block1_graph)
......@@ -213,18 +216,22 @@ def prim_If(mapper, graph, node):
def prim_min(mapper, graph, node):
""" 构造min的PaddleLayer。
PyTorch Script 示例:
TorchScript示例:
%87 : int = prim::min(%86)
参数含义:
%86 (list): 输入。
%87 (int): 输出。
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
graph.add_layer(
"prim.min", inputs={'input': input_node_name}, outputs=[output_name])
return [input_node_name], node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 处理输入0,即%86
mapper._check_input(graph, inputs_node[0], inputs_name[0], layer_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.min", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
......@@ -34,7 +34,7 @@ class PyTorchOpMapper(OpMapper):
# 转换
self.graph, _ = self.traverse(decoder.graph)
def traverse(self, script_graph, control_node=None, father_layer=None):
def traverse(self, script_graph, parent_layer=None):
# 用于获取graph的输入
def _update_graph_inputs(inputs, outputs):
current_node_outputs.extend(outputs)
......@@ -43,7 +43,7 @@ class PyTorchOpMapper(OpMapper):
graph_inputs.append(name)
# 初始化
graph = PaddleGraph(father_layer)
graph = PaddleGraph(parent_layer)
current_node_outputs = []
graph_inputs = []
# 转换输入节点
......@@ -71,7 +71,7 @@ class PyTorchOpMapper(OpMapper):
# 转换输出节点
if hasattr(script_graph, 'returnNode'):
for i, ivalue in enumerate(script_graph.returnNode().inputs()):
if control_node.kind() == "prim::Loop" and i == 0:
if parent_layer.kernel == "prim.loop" and i == 0:
continue
node = ivalue.node()
script_unique_id = ivalue.unique()
......@@ -79,7 +79,7 @@ class PyTorchOpMapper(OpMapper):
graph,
node,
uid=script_unique_id,
control_node=control_node,
parent_layer=parent_layer,
index=i)
_update_graph_inputs(inputs, outputs)
# 设置graph的参数
......@@ -129,6 +129,17 @@ class PyTorchOpMapper(OpMapper):
value=string(param) if isinstance(param, str) else param)
node_outputs.append(output_name)
def _get_inputs_name(self, node):
inputs_name = []
inputs_node = []
for script_input_ivalue in node.inputs():
script_input_node = script_input_ivalue.node()
script_input_unique_id = script_input_ivalue.unique()
input_node_name = self.outputs_info[script_input_unique_id]
inputs_node.append(script_input_node)
inputs_name.append(input_node_name)
return inputs_name, inputs_node
def data(self, graph, node, uid):
for output_ivalue in node.outputs():
script_unique_id = output_ivalue.unique()
......@@ -145,17 +156,14 @@ class PyTorchOpMapper(OpMapper):
value=output_name)
return [], [output_name]
def equal(self, graph, node, uid=None, control_node=None, index=None):
if control_node is not None and index is not None:
kind = control_node.kind()
def equal(self, graph, node, uid=None, parent_layer=None, index=None):
if parent_layer is not None and index is not None:
# block的输出
input_node_name = self.outputs_info[uid]
control_output_id = index
if kind == "prim::Loop":
if parent_layer.kernel == "prim.loop":
control_output_id = index - 1
output_ivalue = list(control_node.outputs())[
control_output_id].unique()
output_node_name = self.outputs_info[output_ivalue]
output_node_name = parent_layer.outputs[control_output_id]
graph.add_layer(
"prim.equal",
inputs={'input': input_node_name},
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册