未验证 提交 d93af72b 编写于 作者: J Jason 提交者: GitHub

Merge pull request #392 from SunAhong1993/me

add optimizer and aten for shufflenet
......@@ -44,6 +44,11 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel
```
x2paddle --framework=onnx --model=onnx_model.onnx --save_dir=pd_model
```
### PyTorch
```
x2paddle --framework=pytorch --model=resnet50.pt --save_dir=pd_model
```
### Paddle2ONNX
```
# 注意:paddle_infer_model_dir下需包含__model__和__params__两个文件
......@@ -56,7 +61,7 @@ x2paddle --framework=paddle2onnx --model=paddle_infer_model_dir --save_dir=onnx_
|--prototxt | 当framework为caffe时,该参数指定caffe模型的proto文件路径 |
|--weight | 当framework为caffe时,该参数指定caffe模型的参数文件路径 |
|--save_dir | 指定转换后的模型保存目录路径 |
|--model | 当framework为tensorflow/onnx时,该参数指定tensorflow的pb模型文件或onnx模型路径 |
|--model | 当framework为tensorflow/onnx/pytorch时,该参数指定tensorflow的pb模型文件或onnx模型路径或者pytorch的script模型 |
|--caffe_proto | **[可选]** 由caffe.proto编译成caffe_pb2.py文件的存放路径,当存在自定义Layer时使用,默认为None |
|--without_data_format_optimization | **[可选]** For TensorFlow, 当指定该参数时,关闭NHWC->NCHW的优化,见[文档Q2](FAQ.md) |
|--define_input_shape | **[可选]** For TensorFlow, 当指定该参数时,强制用户输入每个Placeholder的shape,见[文档Q2](FAQ.md) |
......@@ -81,6 +86,7 @@ X2Paddle提供了工具解决如下问题,详见[tools/README.md](tools/README
3. [X2Paddle测试模型库](x2paddle_model_zoo.md)
4. [PyTorch模型导出为ONNX模型](pytorch_to_onnx.md)
5. [X2Paddle内置的Caffe自定义层](caffe_custom_layer.md)
6. [PyTorch模型导出为ScriptModule模型](pytorch_to_script.md)
## 更新历史
2019.08.05
......
## PyTorch模型导出为ONNX模型
目前pytorch2paddle主要支持pytorch ScriptModule。 用户可通过如下示例代码,将torchvison或者自己开发写的模型转换成ScriptModule model:
```
#coding: utf-8
import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
# 定义模型
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(0.0),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(0.0),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
for i in range(1):
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
# 初始化模型
model = AlexNet()
# 加载参数
state_dict = load_state_dict_from_url('https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
progress=True)
model.load_state_dict(state_dict)
# 设置模式
model.eval()
# 生成ScriptModule并保存
script = torch.jit.script(model)
torch.jit.save(script, "alexnet.pt")
```
......@@ -88,6 +88,12 @@ def arg_parser():
action="store_true",
default=False,
help="define whether merge the params")
parser.add_argument(
"--input_shapes",
"-is",
action='append',
default=None,
help="define the inputs' shape")
return parser
......@@ -174,6 +180,45 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
print("Paddle model and code generated.")
def pytorch2paddle(model_path, save_dir, input_shapes):
# check pytorch installation and version
try:
import torch
version = torch.__version__
ver_part = version.split('.')
print(ver_part)
if int(ver_part[1]) < 5:
print("[ERROR] pytorch>=1.5.0 is required")
return
except:
print(
"[ERROR] Pytorch is not installed, use \"pip install torch==1.5.0 torchvision\"."
)
return
print("Now translating model from pytorch to paddle.")
from x2paddle.decoder.pytorch_decoder import PyTorchDecoder
from x2paddle.op_mapper.pytorch2paddle import pytorch_op_mapper
model = PyTorchDecoder(model_path)
mapper = pytorch_op_mapper.PyTorchOpMapper(model)
mapper.graph.build()
print("Model optimizing ...")
from x2paddle.optimizer.optimizer import GraphOptimizer
graph_opt = GraphOptimizer()
graph_opt.optimize(mapper.graph)
print("Model optimized.")
if input_shapes is not None:
real_input_shapes = list()
for shape in input_shapes:
sp = shape[1:-1].split(",")
for i, s in enumerate(sp):
sp[i] = int(s)
real_input_shapes.append(sp)
else:
real_input_shapes = None
mapper.graph.gen_model(save_dir, real_input_shapes)
def paddle2onnx(model_path, save_dir, opset_version=10):
from x2paddle.decoder.paddle_decoder import PaddleDecoder
from x2paddle.op_mapper.paddle2onnx.paddle_op_mapper import PaddleOpMapper
......@@ -243,6 +288,9 @@ def main():
if args.params_merge:
params_merge = True
onnx2paddle(args.model, args.save_dir, params_merge)
elif args.framework == "pytorch":
assert args.model is not None, "--model should be defined while translating pytorch model"
pytorch2paddle(args.model, args.save_dir, args.input_shapes)
elif args.framework == "paddle2onnx":
assert args.model is not None, "--model should be defined while translating paddle model to onnx"
......
......@@ -15,6 +15,8 @@
from __future__ import print_function
from __future__ import division
import paddle.fluid as fluid
import os.path as osp
import paddle
from paddle.fluid.proto import framework_pb2
from collections import OrderedDict
import numpy
......@@ -101,11 +103,6 @@ class PaddleGraph(object):
self.clear_edges()
outputs_from_nodes = dict()
for layer_id, layer in self.layers.items():
# if "x5109" in layer.outputs or "x5110" in layer.outputs:
# print(layer.kernel)
# print(layer.inputs)
# print(layer.outputs)
# print(layer.attrs)
for input_key, input_var in layer.inputs.items():
vs = input_var
if not isinstance(vs, list):
......@@ -131,12 +128,33 @@ class PaddleGraph(object):
for output in layer.outputs:
outputs_from_nodes[output] = layer_id
# 将block的输出用于父图
if inputs is not None and outputs is not None and set(
layer.outputs).issubset(outputs):
if layer_id not in self.edges_out:
self.edges_out[layer_id] = list()
self.edges_out[layer_id].append(-1)
# 处理子图
if len(layer.blocks) > 0:
for block in layer.blocks:
block.build(layer.inputs, layer.outputs)
# 删除不必要的节点
invalid_list = list()
for layer_id, layer in self.layers.items():
if len(self.layers) > 1:
if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get(
layer_id, 0) == 0 and layer.kernel != "prim.assert" \
and layer.kernel != "prim.exception" \
and layer.kernel != "prim.warnings":
invalid_list.append(layer_id)
for layer_id in invalid_list:
self.layers.pop(layer_id)
if self.graph_type == "dygraph":
self.get_dygraph_inputs()
if len(self.outputs) == 0:
self.get_dygraph_outputs()
def get_global_layers(self):
......@@ -169,8 +187,8 @@ class PaddleGraph(object):
f, [
"from paddle.fluid.initializer import Constant",
"from paddle.fluid.param_attr import ParamAttr",
"import paddle.fluid as fluid"
"", "def x2paddle_net():"
"import paddle.fluid as fluid", "import math", "",
"def x2paddle_net():"
],
indent=0)
for layer_id, layer in self.layers.items():
......@@ -208,7 +226,9 @@ class PaddleGraph(object):
indent=1)
f.close()
def gen_model(self, save_dir):
def gen_model(self, save_dir, input_shapes=None):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if self.graph_type == "static":
code_dir = os.path.join(save_dir, 'model_with_code')
infer_dir = os.path.join(save_dir, 'inference_model')
......@@ -244,6 +264,9 @@ class PaddleGraph(object):
else:
self.gen_dygraph_code(save_dir)
self.dump_dygraph_parameter(save_dir)
if input_shapes is not None:
# 如果input_shapes非空,则导出推理模型;其值类似[[None, 3, 224, 224]]
self.dygraph2static(save_dir, input_shapes)
def dump_parameter(self, param_name, param, save_dir):
if not os.path.exists(save_dir):
......@@ -296,6 +319,8 @@ class PaddleGraph(object):
update(self.layers)
self.inputs = list(set(self.inputs))
if self.inputs is not None:
self.inputs.sort()
def get_dygraph_outputs(self):
for layer_id, layer in self.layers.items():
......@@ -325,6 +350,7 @@ class PaddleGraph(object):
[
"from paddle.fluid.initializer import Constant",
"from paddle.fluid.param_attr import ParamAttr",
"import paddle",
"import paddle.fluid as fluid",
"",
"class {}(fluid.dygraph.Layer):".format(self.name),
......@@ -343,7 +369,7 @@ class PaddleGraph(object):
indent=1))
def write_code(code_dir):
f = open(os.path.join(code_dir, 'code.py'), 'w')
f = open(os.path.join(code_dir, 'x2paddle_code.py'), 'w')
for code_line in self.head:
f.write(code_line)
init_writen_codes = []
......@@ -365,13 +391,9 @@ class PaddleGraph(object):
gen_head()
for layer_id, layer in self.layers.items():
if len(self.layers) > 1:
if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get(
layer_id, 0) == 0 and layer.kernel != "prim.assert" \
and layer.kernel != "prim.exception" \
and layer.kernel != "prim.warnings":
continue
if "dygraph" in layer.kernel:
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel
) or layer.kernel == "fluid.dygraph.base.to_variable" or \
"paddle.fluid.dygraph" in layer.kernel:
line = "{}".format(
layer.outputs[0]
) if layer.kernel == "fluid.dygraph.base.to_variable" and not layer.attrs[
......@@ -442,3 +464,24 @@ class PaddleGraph(object):
params_output = open(os.path.join(code_dir, 'model.pdparams'), 'wb')
pickle.dump(self.parameters, params_output)
params_output.close()
def dygraph2static(self, save_dir, input_shapes=[]):
from paddle.fluid.dygraph.jit import declarative
sepc_list = list()
for i, name in enumerate(self.inputs):
sepc_list.append(
paddle.static.InputSpec(
shape=input_shapes[i], name=name))
import sys
path = osp.abspath(save_dir)
sys.path.insert(0, save_dir)
import x2paddle_code
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
restore, _ = fluid.load_dygraph(osp.join(save_dir, "model"))
model = getattr(x2paddle_code, self.name)(restore)
model.set_dict(restore)
model.eval()
model.forward = declarative(model.forward, sepc_list)
fluid.dygraph.jit.save(
layer=model, model_path=osp.join(save_dir, "inference"))
......@@ -28,6 +28,7 @@ class PyTorchDecoder(object):
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph)
graph = torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph)
torch._C._jit_pass_constant_propagation(graph)
return graph
......@@ -13,6 +13,19 @@
# limitations under the License.
from x2paddle.core.util import *
from x2paddle.core.program import PaddleGraph
dtype_dict = {
0: string("uint8"),
1: string("int8"),
2: string("int16"),
3: string("int32"),
4: string("int64"),
5: string("float16"),
6: string("float32"),
7: string("float64"),
11: string("bool")
}
def aten_adaptive_avg_pool2d(mapper, graph, node):
......@@ -43,7 +56,7 @@ def aten_adaptive_avg_pool2d(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_attrs["pool_size"] = inputs_name[1]
layer_inputs["pool_size"] = inputs_name[1]
current_inputs.append(inputs_name[1])
layer_attrs["pool_type"] = string("avg")
......@@ -93,7 +106,7 @@ def aten_addmm(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[3], inputs_name[3],
current_outputs)
layer_attrs["beta"] = inputs_name[3]
layer_inputs["beta"] = inputs_name[3]
current_inputs.append(inputs_name[3])
# 处理输入4,即%151
if inputs_name[4] in mapper.attrs:
......@@ -101,11 +114,11 @@ def aten_addmm(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[4], inputs_name[4],
current_outputs)
layer_attrs["alpha"] = inputs_name[4]
layer_inputs["alpha"] = inputs_name[4]
current_inputs.append(inputs_name[4])
graph.add_layer(
"fluid.layers.addmm",
"paddle.addmm",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
......@@ -175,7 +188,7 @@ def aten_add_(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_attrs["alpha"] = inputs_name[2]
layer_inputs["alpha"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
......@@ -203,8 +216,7 @@ def aten___and__(mapper, graph, node):
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%288
mapper._check_input(
graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True)
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
......@@ -241,6 +253,95 @@ def aten_append(mapper, graph, node):
return current_inputs, current_outputs
def aten_arange(mapper, graph, node):
""" 构造以步长均匀分隔给定数值区间的PaddleLayer。
TorchScript示例:
有三种情况,分别处理。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
current_inputs = []
if len(inputs_name) == 5:
# %position_ids.1 : Tensor = aten::arange(%52, %43, %45, %42, %46)
# 输入的后三者分别代表layout、device、是否使用梯度
# 处理输入0,即%52,代表end
if inputs_name[0] in mapper.attrs:
layer_attrs["end"] = mapper.attrs[inputs_name[0]]
else:
mapper._check_input(graph, inputs_node[0], inputs_name[0],
current_outputs)
layer_inputs["end"] = inputs_name[0]
current_inputs.append(inputs_name[0])
# 处理输入1,即%43,代表dtype
layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[1]]]
elif len(inputs_name) == 6:
# %position_ids.1 : Tensor = aten::arange(%51, %52, %43, %45, %42, %46)
# 输入的后三者分别代表layout、device、是否使用梯度
# 处理输入0,即%51,代表start
if inputs_name[0] in mapper.attrs:
layer_attrs["start"] = mapper.attrs[inputs_name[0]]
else:
mapper._check_input(graph, inputs_node[0], inputs_name[0],
current_outputs)
layer_inputs["start"] = inputs_name[0]
current_inputs.append(inputs_name[0])
# 处理输入1,即%52,代表end
if inputs_name[1] in mapper.attrs:
layer_attrs["end"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["end"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%43,代表dtype
layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[2]]]
elif len(inputs_name) == 7:
# %position_ids.1 : Tensor = aten::arange(%51, %52, %53, %43, %45, %42, %46)
# 输入的后三者分别代表layout、device、是否使用梯度
# 处理输入0,即%51,代表start
if inputs_name[0] in mapper.attrs:
layer_attrs["start"] = mapper.attrs[inputs_name[0]]
else:
mapper._check_input(graph, inputs_node[0], inputs_name[0],
current_outputs)
layer_inputs["start"] = inputs_name[0]
current_inputs.append(inputs_name[0])
# 处理输入1,即%52,代表end
if inputs_name[1] in mapper.attrs:
layer_attrs["end"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["end"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%53,代表step
if inputs_name[2] in mapper.attrs:
layer_attrs["step"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_inputs["step"] = inputs_name[2]
current_inputs.append(inputs_name[2])
# 处理输入3,即%43,代表dtype
layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[3]]]
else:
raise Exception("Unknown aten::arange signature taking " + str(
len(inputs_name)) + " arguments.")
graph.add_layer(
"paddle.arange",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_avg_pool2d(mapper, graph, node):
""" 构造最大池化的PaddleLayer。
......@@ -294,7 +395,7 @@ def aten_avg_pool2d(mapper, graph, node):
layer_attrs["pool_type"] = string("avg")
graph.add_layer(
"fluid.dygraph.Pool2D",
"paddle.nn.Pool2D",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
......@@ -360,7 +461,7 @@ def aten_batch_norm(mapper, graph, node):
layer_attrs["epsilon"] = mapper.attrs[inputs_name[7]]
graph.add_layer(
"fluid.dygraph.BatchNorm",
"paddle.nn.BatchNorm",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
......@@ -395,7 +496,7 @@ def aten_cat(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_attrs["axis"] = inputs_name[1]
layer_inputs["axis"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.concat",
......@@ -405,6 +506,111 @@ def aten_cat(mapper, graph, node):
return current_inputs, current_outputs
def aten_chunk(mapper, graph, node):
"""构造分割Tensor的PaddleLayer。
TorchScript示例:
%724 : Tensor[] = aten::chunk(%input.170, %720, %719)
参数含义:
%724 (Tensor): 输出,分割后的结果。
%input.170 (Tensor): 需要进行分割的Tensor。
%720 (int): 分割的块数。
%719 (int): 分割的维度。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.170
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%720
if inputs_name[1] in mapper.attrs:
layer_attrs["num_or_sections"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["num_or_sections"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%719
if inputs_name[2] in mapper.attrs:
layer_attrs["dim"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_inputs["dim"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
"fluid.layers.split",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten___contains__(mapper, graph, node):
""" 构造in的PaddleLayer。
TorchScript示例:
%51 : bool = aten::__contains__(%50, %name.1)
参数含义:
%51 (bool): 输出,第一个元素是否包含第二个元素。
%50 (-): 需对比的输入1。
%name.1 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%50
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 处理输入1,即%name.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["element"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.contain", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_contiguous(mapper, graph, node):
""" 构造在内存中连续存储的PaddleLayer。
TorchScript示例:
%x.7 : Tensor = aten::contiguous(%4058, %4046)
参数含义:
%x.7 (Tensor): 输出,在内存中连续存储的Tensor。
%4058 (Tensor): 原始Tensor。
%4046 (int): 存储的形式。
【注意】Paddle中无此用法,所以此处翻译成赋值。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%4058
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_conv2d(mapper, graph, node):
""" 构造conv2d的PaddleLayer。
......@@ -440,8 +646,8 @@ def aten_conv2d(mapper, graph, node):
# 处理输入1,即%25
weights = mapper.pytorch_params[inputs_name[1]]
mapper.paddle_params[conv2d_name + ".weight"] = weights
layer_attrs["num_filters"] = weights.shape[0]
layer_attrs["filter_size"] = weights.shape[2:]
layer_attrs["out_channels"] = weights.shape[0]
layer_attrs["kernel_size"] = weights.shape[2:]
# 处理输入2,即%27
if inputs_name[2] in mapper.pytorch_params:
bias = mapper.pytorch_params[inputs_name[2]]
......@@ -459,29 +665,40 @@ def aten_conv2d(mapper, graph, node):
layer_attrs["dilation"] = mapper.attrs[inputs_name[5]]
# 处理输入6,即%26
layer_attrs["groups"] = mapper.attrs[inputs_name[6]]
layer_attrs['num_channels'] = weights.shape[1] * mapper.attrs[inputs_name[
6]]
layer_attrs['in_channels'] = weights.shape[1] * mapper.attrs[inputs_name[6]]
graph.add_layer(
"fluid.dygraph.Conv2D",
"paddle.nn.Conv2d",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_dim(mapper, graph, node):
""" 构造获取维度的PaddleLayer。
def aten__convolution(mapper, graph, node):
""" 构造conv2d的PaddleLayer。
TorchScript示例:
%106 : int = aten::dim(%101)
%input.10 : Tensor = aten::_convolution(%input.8, %25, %27, %28, %29, %30, %26)
参数含义:
%106 (int): 输出,Tensor的维度。
%101 (Tensor): 输入的Tensor。
%input.10 (Tensor): 输出,卷积后的结果。
%input.8 (Tensor): 需要进行卷积的特征层。
%25 (Tensor): weights。
%27 (Tensor): bias。
%28 (int): 步长大小。
%29 (int): 填充大小。
%30 (int): 膨胀系数大小。
%26 (int): 卷积的组数。
"""
if "conv" in mapper.dygraph_name_id:
mapper.dygraph_name_id["conv"] += 1
else:
mapper.dygraph_name_id["conv"] = 0
conv2d_name = "conv" + str(mapper.dygraph_name_id["conv"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_outputs = [conv2d_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
......@@ -490,124 +707,164 @@ def aten_dim(mapper, graph, node):
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%25
weights = mapper.pytorch_params[inputs_name[1]]
mapper.paddle_params[conv2d_name + ".weight"] = weights
layer_attrs["num_filters"] = weights.shape[0]
layer_attrs["filter_size"] = weights.shape[2:]
# 处理输入2,即%27
if inputs_name[2] in mapper.pytorch_params:
bias = mapper.pytorch_params[inputs_name[2]]
if bias is not None:
mapper.paddle_params[conv2d_name + ".bias"] = bias
else:
layer_attrs["bias_attr"] = False
else:
layer_attrs["bias_attr"] = False
# 处理输入3,即%28
layer_attrs["stride"] = mapper.attrs[inputs_name[3]]
# 处理输入4,即%29
layer_attrs["padding"] = mapper.attrs[inputs_name[4]]
# 处理输入5,即%30
layer_attrs["dilation"] = mapper.attrs[inputs_name[5]]
# 处理输入6,即%26
layer_attrs["groups"] = mapper.attrs[inputs_name[6]]
layer_attrs['num_channels'] = weights.shape[1] * mapper.attrs[inputs_name[
6]]
graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"prim.len", inputs={"input": output_name}, outputs=layer_outputs)
"paddle.nn.Conv2D",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_dropout(mapper, graph, node):
""" 构造Dropout的PaddleLayer。
def aten_cos(mapper, graph, node):
""" 构造数学计算cos的PaddleLayer。
TorchScript示例:
%119 : Tensor = aten::dropout(%result.3, %117, %118)
%94 : Tensor = aten::cos(%sinusoid_inp.1)
参数含义:
%119 (Tensor): Dropout后的Tensor。
%result.3 (Tensor): 输入Tensor。
%118 (bool): 是否是训练阶段。
%94 (Tensor): 输出,cos之后的结果。
%sinusoid_inp.1 (Tensor): 需要进行shape的Tensor。
"""
if "dropout" in mapper.dygraph_name_id:
mapper.dygraph_name_id["dropout"] += 1
else:
mapper.dygraph_name_id["dropout"] = 0
dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [dropout_name, output_name]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%119
# 处理输入0,即%sinusoid_inp.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"fluid.dygraph.Dropout",
inputs=layer_inputs,
outputs=layer_outputs,
p=0.0)
graph.add_layer("paddle.cos", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_dropout_(mapper, graph, node):
""" 构造Dropout的PaddleLayer。
def aten_cumsum(mapper, graph, node):
""" 构造与前一个元素累加的PaddleLayer。
TorchScript示例:
%119 : Tensor = aten::dropout_(%result.3, %117, %118)
%56 : Tensor = aten::cumsum(%mask.1, %46, %48)
参数含义:
%119 (Tensor): Dropout后的Tensor。
%result.3 (Tensor): 输入Tensor。
%118 (bool): 是否是训练阶段。
%56 (Tensor): 输出,累加后的结果。
%mask.1 (Tensor): 输入,需要累加的Tensor。
%46 (int): 累加的维度。
%48 (int/None): Tensor的类型。
"""
if "dropout" in mapper.dygraph_name_id:
mapper.dygraph_name_id["dropout"] += 1
else:
mapper.dygraph_name_id["dropout"] = 0
dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [dropout_name, output_name]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%119
# 处理输入0,即%mask.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%46
if inputs_name[1] in mapper.attrs:
layer_attrs["axis"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["axis"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入1,即%48,代表dtype
if mapper.attrs[inputs_name[2]] is None:
layer_attrs["dtype"] = None
else:
layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[2]]]
graph.add_layer(
"fluid.dygraph.Dropout",
"paddle.cumsum",
inputs=layer_inputs,
outputs=layer_outputs,
p=0.0)
**layer_attrs)
return current_inputs, current_outputs
def aten_eq(mapper, graph, node):
""" 构造判断数值是否相等的PaddleLayer。
def aten_detach(mapper, graph, node):
""" 构造返回一个新的Tensor,从当前计算图中分离下来的,但是仍指向原变量的存放位置的PaddleLayer。
TorchScript示例:
%125 : bool = aten::eq(%124, %123)
%107 : Tensor = aten::detach(%new_mem.1)
参数含义:
%125 (bool): 对比后结果。
%124 (-): 需对比的输入1。
%123 (-): 需对比的输入2。
%107 (Tensor): 输出,得到的Scalar。
%new_mem.1 (Tensor): 输入。
【注意】由于Paddle无此操作,所以此处制转换为赋值。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
# 处理输入0,即%end.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_flatten(mapper, graph, node):
""" 构造flatten的PaddleLayer。
def aten_dict(mapper, graph, node):
""" 构造初始化dict的PaddleLayer。
TorchScript示例:
%x.8 : Tensor = aten::flatten(%x, %4, %2)
%features.1 : Dict(str, Tensor) = aten::dict()
参数含义:
%x.8 (Tensor): flatten后结果。
%x (Tensor): 输入Tensor。
%4 (int): flatten的开始维度。
%2 (int): flatten的结束维度。
%features.1: 输出,初始化的dict。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
current_inputs = {}
# 获取当前节点输出的list
current_outputs = [output_name]
注意:目前flatten只支持第一维的flatten
graph.add_layer("prim.dict", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_dim(mapper, graph, node):
""" 构造获取维度的PaddleLayer。
TorchScript示例:
%106 : int = aten::dim(%101)
参数含义:
%106 (int): 输出,Tensor的维度。
%101 (Tensor): 输入的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -615,45 +872,28 @@ def aten_flatten(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入1,即%4
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[1]],
type='eq',
key=mapper.attrs[inputs_name[1]],
value=1)
# 处理输入2,即%2
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[2]],
type='eq',
key=mapper.attrs[inputs_name[2]],
value=-1)
# 处理输入0,即%x
# 处理输入0,即%input.8
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"fluid.layers.flatten",
inputs=layer_inputs,
outputs=layer_outputs,
axis=1)
"fluid.layers.shape", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"prim.len", inputs={"input": output_name}, outputs=layer_outputs)
return current_inputs, current_outputs
def aten___getitem__(mapper, graph, node):
""" 构造获取list中元素的PaddleLayer。
def aten_div_(mapper, graph, node):
""" 构造除法的PaddleLayer。
TorchScript示例:
%v.1 : int = aten::__getitem__(%72, %88)
%bx_bw0.3 : Tensor = aten::div_(%bx_bw.3, %2678)
参数含义:
%v.1 (-): 输出,list中的元素
%72 (list): 需要获取元素的list
%88 (int): 索引
%bx_bw0.3 (-): 除后的结果
%bx_bw.3 (-): 被除数
%2678 (int): 除数
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -661,28 +901,28 @@ def aten___getitem__(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%72
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["list"] = inputs_name[0]
# 处理输入1,即%88
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["index"] = inputs_name[1]
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.getitem", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer("prim.div", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_gt(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
def aten_div(mapper, graph, node):
""" 构造除法的PaddleLayer。
TorchScript示例:
%83 : bool = aten::gt(%82, %78)
%bx_bw0.3 : Tensor = aten::div_(%bx_bw.3, %2678)
参数含义:
%83 (bool): 输出,第一个元素是否大于第二个元素
%82 (-): 需对比的输入1
%78 (-): 需对比的输入2
%bx_bw0.3 (-): 除后的结果
%bx_bw.3 (-): 被除数
%2678 (int): 除数
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -690,129 +930,142 @@ def aten_gt(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%82
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%78
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.gt", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer("prim.div", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_hardtanh_(mapper, graph, node):
""" 构造hardtanh激活的PaddleLayer。
def aten_dropout(mapper, graph, node):
""" 构造Dropout的PaddleLayer。
TorchScript示例:
%result.9 : Tensor = aten::hardtanh_(%input.20, %67, %66)
%119 : Tensor = aten::dropout(%result.3, %117, %118)
参数含义:
%result.9 (Tensor): 输出,hardtanh激活后的Tensor。
%input.20 (Tensor): 需要hardtanh激活的Tensor。
%67 (float): hardtanh激活的最小阈值。
%66 (float): hardtanh激活的最大阈值。
%119 (Tensor): Dropout后的Tensor。
%result.3 (Tensor): 输入Tensor。
%118 (bool): 是否是训练阶段。
"""
if "dropout" in mapper.dygraph_name_id:
mapper.dygraph_name_id["dropout"] += 1
else:
mapper.dygraph_name_id["dropout"] = 0
dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_outputs = [dropout_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入1,即%67
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[1]],
type='eq',
key=mapper.attrs[inputs_name[1]],
value=0.0)
# 处理输入2,即%66
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[2]],
type='eq',
key=mapper.attrs[inputs_name[2]],
value=6.0)
# 处理输入0,即%input.20
# 处理输入0,即%119
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
'fluid.layers.relu6',
inputs=layer_inputs,
outputs=layer_outputs,
threshold=6.0)
"paddle.nn.Dropout", inputs=layer_inputs, outputs=layer_outputs, p=0.0)
return current_inputs, current_outputs
def aten_le(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
def aten_dropout_(mapper, graph, node):
""" 构造Dropout的PaddleLayer。
TorchScript示例:
%80 : bool = aten::le(%78, %79)
%119 : Tensor = aten::dropout_(%result.3, %117, %118)
参数含义:
%80 (bool): 输出,第一个元素是否小于等于第二个元素
%78 (-): 需对比的输入1
%79 (-): 需对比的输入2
%119 (Tensor): Dropout后的Tensor
%result.3 (Tensor): 输入Tensor
%118 (bool): 是否是训练阶段
"""
if "dropout" in mapper.dygraph_name_id:
mapper.dygraph_name_id["dropout"] += 1
else:
mapper.dygraph_name_id["dropout"] = 0
dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_outputs = [dropout_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%78
# 处理输入0,即%119
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%79
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.le", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"paddle.nn.Dropout", inputs=layer_inputs, outputs=layer_outputs, p=0.0)
return current_inputs, current_outputs
def aten_len(mapper, graph, node):
""" 构造获取list长度的PaddleLayer。
def aten_embedding(mapper, graph, node):
""" 构造embedding的PaddleLayer。
TorchScript示例:
%85 : int = aten::len(%83)
%inputs_embeds.1 : Tensor = aten::embedding(%57, %input_ids.1, %45, %46, %46)
参数含义:
%85 (int): 输出,list的长度。
%72 (list): 需要获取长度的list。
%inputs_embeds.1 (Tensor): 输出,embedding后的结果。
%57 (Tensor): weights。
%input_ids.1 (Tensor): 需要进行embedding的特征层。
%45 (int): padding_idx。
%46 (bool): scale_grad_by_freq。
%46 (bool): sparse。
"""
if "embedding" in mapper.dygraph_name_id:
mapper.dygraph_name_id["embedding"] += 1
else:
mapper.dygraph_name_id["embedding"] = 0
embedding_name = "embedding" + str(mapper.dygraph_name_id["embedding"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_outputs = [embedding_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%72
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 处理输入0,即%57
weights = mapper.pytorch_params[inputs_name[0]]
mapper.paddle_params[embedding_name + ".weight"] = weights
# layer_attrs["num_embeddings"] = weights.shape[0]
# layer_attrs["embedding_dim"] = weights.shape[1]
layer_attrs["size"] = weights.shape
# 处理输入1,即%input_ids.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["input"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入2,即%45
layer_attrs["padding_idx"] = mapper.attrs[inputs_name[2]]
# 处理输入4,即%46
# layer_attrs["sparse"] = mapper.attrs[inputs_name[4]]
layer_attrs["is_sparse"] = mapper.attrs[inputs_name[4]]
graph.add_layer("prim.len", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"paddle.fluid.dygraph.Embedding",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_lt(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
def aten_eq(mapper, graph, node):
""" 构造判断数值是否相等的PaddleLayer。
TorchScript示例:
%80 : bool = aten::lt(%78, %79)
%125 : bool = aten::eq(%124, %123)
参数含义:
%80 (bool): 输出,第一个元素是否小于第二个元素
%78 (-): 需对比的输入1。
%79 (-): 需对比的输入2。
%125 (bool): 对比后结果
%124 (-): 需对比的输入1。
%123 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -820,85 +1073,58 @@ def aten_lt(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%78
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%79
x_value = list(node.inputs())[0]
x_type = x_value.type()
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
y_value = list(node.inputs())[1]
y_type = y_value.type()
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.lt", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_max_pool2d(mapper, graph, node):
""" 构造最大池化的PaddleLayer。
def aten_exp(mapper, graph, node):
""" 构造以自然数e为底指数运算的PaddleLayer。
TorchScript示例:
%input.8 : Tensor = aten::max_pool2d(%result.11, %20, %23, %21, %22, %19)
%55 : Tensor = aten::tanh(%54)
参数含义:
%input.8 (Tensor): 输出,池化后的结果。
%result.11 (Tensor): 需要池化的Tensor。
%20 (list): 池化kernel的大小。
%23 (list): 步长大小。
%21 (list): 填充大小。
%22 (list): 膨胀系数大小。
%19 (bool): 是否用ceil函数计算输出高度和宽度。
%55 (Tensor): 输出,运算后的结果。
%54 (Tensor): 需要指数运算的Tensor。
"""
if "pool" in mapper.dygraph_name_id:
mapper.dygraph_name_id["pool"] += 1
else:
mapper.dygraph_name_id["pool"] = 0
pool_name = "pool" + str(mapper.dygraph_name_id["pool"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [pool_name, output_name]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.11
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%20
layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%23
layer_attrs["pool_stride"] = mapper.attrs[inputs_name[2]]
# 处理输入3,即%21
layer_attrs["pool_padding"] = mapper.attrs[inputs_name[3]]
# 处理输入4,即%22
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[4]],
type="eq",
key=mapper.attrs[inputs_name[4]],
value=[1, [1, 1]])
# 处理输入5,即%19
layer_attrs["ceil_mode"] = mapper.attrs[inputs_name[5]]
layer_attrs["pool_type"] = string("max")
graph.add_layer(
"fluid.dygraph.Pool2D",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
"fluid.layers.exp", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_matmul(mapper, graph, node):
""" 构造矩阵相乘的PaddleLayer。
def aten_expand(mapper, graph, node):
""" 构造对某维度进行广播的PaddleLayer。
TorchScript示例:
%output.2 : Tensor = aten::matmul(%101, %111)
%1889 : Tensor = aten::expand(%1875, %1888, %1567)
参数含义:
%output.2 (Tensor): 输出,相乘后的结果。
%101 (Tensor): 矩阵1。
%102 (Tensor): 矩阵2。
%1889 (Tensor): 广播后的结果。
%1875 (Tensor): 需要广播的Tensor。
%1888 (int): 广播的维度。
%1567 (bool): 未使用。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -906,29 +1132,79 @@ def aten_matmul(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%101
# 处理输入0,即%1875
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%102
# 处理输入1,即%1888
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
graph.add_layer(
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"])
graph.add_layer(
"prim.str",
inputs={"input": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_type"])
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_cond"],
y=string("VarType.BOOL"))
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if1"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"fluid.layers.cast",
inputs={"x": inputs_name[0]},
outputs=[inputs_name[0]],
dtype=string("int64"))
block.add_layer(
"fluid.layers.create_global_var",
inputs={"shape": inputs_name[1]},
outputs=[inputs_name[1] + "_var"],
value=1.0,
dtype=string("int64"),
persistable=True)
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"])
block.add_layer(
"fluid.layers.create_global_var",
inputs={"shape": inputs_name[1]},
outputs=[inputs_name[1] + "_var"],
value=1.0,
dtype=inputs_name[0] + "_type",
persistable=True)
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[0]
if_layer.inputs["input-1"] = inputs_name[1]
layer_inputs["target_tensor"] = inputs_name[1] + "_var"
current_outputs.append(inputs_name[1] + "_var")
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.matmul", inputs=layer_inputs, outputs=layer_outputs)
"fluid.layers.expand_as", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_mul(mapper, graph, node):
""" 构造数值相乘的PaddleLayer。
def aten_expand_as(mapper, graph, node):
""" 构造广播的PaddleLayer。
TorchScript示例:
%size_prods.39 : int = aten::mul(%size_prods.38, %114)
%1889 : Tensor = aten::expand_as(%1875, %1888)
参数含义:
%size_prods.39 (Tensor): 输出,相乘后的结果。
%size_prods.38 (-): 数值1
%114 (-): 数值2
%1889 (Tensor): 广播后的结果。
%1875 (Tensor): 需要广播的Tensor
%1888 (Tensor): 广播的示例
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -936,57 +1212,120 @@ def aten_mul(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%size_prods.38
# 处理输入0,即%1875
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%114
# 处理输入1,即%1888
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
layer_inputs["target_tensor"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.mul", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"])
graph.add_layer(
"prim.str",
inputs={"input": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_type"])
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_cond"],
y=string("VarType.BOOL"))
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if1"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"prim.type",
inputs={"input": inputs_name[1]},
outputs=[inputs_name[1] + "_type"])
block.add_layer(
"fluid.layers.cast",
inputs={"x": inputs_name[0]},
outputs=[inputs_name[0]],
dtype=inputs_name[1] + "_type")
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[0]
if_layer.inputs["input-1"] = inputs_name[1]
graph.add_layer(
"fluid.layers.expand_as", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if2"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"fluid.layers.cast",
inputs={"x": layer_outputs[0]},
outputs=layer_outputs,
dtype=string("bool"))
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(block)
if_layer.inputs["input-0"] = layer_outputs[0]
return current_inputs, current_outputs
def aten_ne(mapper, graph, node):
""" 构造判断数值是否不相等的PaddleLayer。
def aten_eye(mapper, graph, node):
""" 构造批次二维矩阵的PaddleLayer。
TorchScript示例:
%134 : bool = aten::ne(%133, %132)
%68 : Tensor = aten::eye(%49, %_50, %_51, %15, %9, %67, %7)
参数含义:
%134 (bool): 对比后结果。
%133 (-): 需对比的输入1。
%132 (-): 需对比的输入2。
%68 (Tensor): 输出,构造的矩阵。
%49 (int): 行数。
%_50 (int): 列数,非必须。
%_51 (Tensor): 非必须。
%9 (int): layout。
%67 (str): 设备。
%7 (bool): 是否计算梯度。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
# 处理输入0,即%49
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
layer_inputs["num_rows"] = inputs_name[0]
if len(inputs_name) > 5:
# 处理输入1,即%_50
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["num_columns"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理倒数第4个输入,即%15
layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[-4]]]
graph.add_layer("prim.ne", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"fluid.layers.eye",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_neg(mapper, graph, node):
""" 构造对数值取负的PaddleLayer。
def aten_flatten(mapper, graph, node):
""" 构造flatten的PaddleLayer。
TorchScript示例:
%909 : int = aten::neg(%908)
%x.8 : Tensor = aten::flatten(%x, %4, %2)
参数含义:
%909 (int): 取负后结果。
%908 (int): 需取负的输入。
%x.8 (Tensor): flatten后结果。
%x (Tensor): 输入Tensor。
%4 (int): flatten的开始维度。
%2 (int): flatten的结束维度。
注意:目前flatten只支持第一维的flatten
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -994,24 +1333,69 @@ def aten_neg(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
# 处理输入1,即%4
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[1]],
type='eq',
key=mapper.attrs[inputs_name[1]],
value=1)
# 处理输入2,即%2
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[2]],
type='eq',
key=mapper.attrs[inputs_name[2]],
value=-1)
# 处理输入0,即%x
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"fluid.layers.flatten",
inputs=layer_inputs,
outputs=layer_outputs,
axis=1)
return current_inputs, current_outputs
def aten_Float(mapper, graph, node):
""" 构造取浮点型的PaddleLayer。
TorchScript示例:
%3992 : float = aten::Float(%3991)
参数含义:
%3992 (int): 向上取整后的整数。
%3991 (float): 需要取整的浮点数。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%3991
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.neg", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer("prim.float", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten___not__(mapper, graph, node):
""" 构造对bool型取负的PaddleLayer。
def aten_floor(mapper, graph, node):
""" 构造向上取整的PaddleLayer。
TorchScript示例:
%4498 : bool = aten::__not__(%aux_defined.2)
%3978 : int = aten::floor(%scale.18)
参数含义:
%4498 (bool): 取负后结果
%aux_defined.2 (bool): 需取负的输入
%3978 (int): 向上取整后的整数
%scale.18 (float): 需要取整的浮点数
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -1019,26 +1403,1583 @@ def aten___not__(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
# 处理输入0,即%scale.18
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.not", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer("prim.floor", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_relu(mapper, graph, node):
""" 构造ReLU激活的PaddleLayer。
def aten_floordiv(mapper, graph, node):
""" 构造向上取整除法的PaddleLayer。
TorchScript示例:
%channels_per_group.2 : int = aten::floordiv(%num_channels.2, %3690)
参数含义:
%channels_per_group.2 (-): 除后的结果。
%num_channels.2 (-): 被除数。
%2 (int): 除数。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.floordiv", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_floor_divide(mapper, graph, node):
""" 构造向上取整除法的PaddleLayer。
TorchScript示例:
%channels_per_group.2 : int = aten::floor_divide(%num_channels.2, %3690)
参数含义:
%channels_per_group.2 (-): 除后的结果。
%num_channels.2 (-): 被除数。
%2 (int): 除数。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.floordiv", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_gelu(mapper, graph, node):
""" 构造GeLU激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::gelu(%input.5)
参数含义:
%result.3 (Tensor): 输出,GELU后的结果。
%result.5 (Tensor): 需要GELU的Tensor。
注意: inplace这个参数在paddle中未实现
"""
if "gelu" in mapper.dygraph_name_id:
mapper.dygraph_name_id["gelu"] += 1
else:
mapper.dygraph_name_id["gelu"] = 0
gelu_name = "gelu" + str(mapper.dygraph_name_id["gelu"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [gelu_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.GELU", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten___getitem__(mapper, graph, node):
""" 构造获取list中元素的PaddleLayer。
TorchScript示例:
%v.1 : int = aten::__getitem__(%72, %88)
参数含义:
%v.1 (-): 输出,list中的元素。
%72 (list): 需要获取元素的list。
%88 (int): 索引。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%72
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["list"] = inputs_name[0]
# 处理输入1,即%88
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["index"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.getitem", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_gt(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
TorchScript示例:
%83 : bool = aten::gt(%82, %78)
参数含义:
%83 (bool): 输出,第一个元素是否大于第二个元素。
%82 (-): 需对比的输入1。
%78 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%82
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%78
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.gt", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_hardtanh_(mapper, graph, node):
""" 构造hardtanh激活的PaddleLayer。
TorchScript示例:
%result.9 : Tensor = aten::hardtanh_(%input.20, %67, %66)
参数含义:
%result.9 (Tensor): 输出,hardtanh激活后的Tensor。
%input.20 (Tensor): 需要hardtanh激活的Tensor。
%67 (float): hardtanh激活的最小阈值。
%66 (float): hardtanh激活的最大阈值。
"""
if "tanh" in mapper.dygraph_name_id:
mapper.dygraph_name_id["tanh"] += 1
else:
mapper.dygraph_name_id["tanh"] = 0
tanh_name = "tanh" + str(mapper.dygraph_name_id["tanh"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [tanh_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.20
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%67
layer_attrs["min"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%66
layer_attrs["max"] = mapper.attrs[inputs_name[2]]
graph.add_layer(
'paddle.nn.Hardtanh',
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_index_select(mapper, graph, node):
""" 构造对dict加入元素的PaddleLayer。
TorchScript示例:
%bd.3 : Tensor = aten::index_select(%x2.3, %320, %371)
参数含义:
%bd.3 (Tensor): 输出,选择后的Tensor。
%x2.3 (Tensor): 需要选择的Tensor。
%320 (int): 维度。
%371 (Tensor): 选择的索引。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x2.3
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%320
if inputs_name[1] in mapper.attrs:
layer_attrs["axis"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["axis"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%371
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
layer_inputs["index"] = inputs_name[2]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.index_select",
inputs=layer_inputs,
outputs=current_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_Int(mapper, graph, node):
""" 构造强转为int的PaddleLayer。
TorchScript示例:
%1739 : int = aten::Int(%1738)
参数含义:
%1739 (int): 输出,int型数据。
%1738 (-): 需要强转的数据。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%1738
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.int", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten___is__(mapper, graph, node):
""" 构造is not的PaddleLayer。
TorchScript示例:
%3949 : bool = aten::__isnot__(%size.122, %3931)
参数含义:
%3949 (bool): 输出,第一个元素是否不是第二个元素。
%size.122 (-): 需对比的输入1。
%3931 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%size.122
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%3931
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.is", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten___isnot__(mapper, graph, node):
""" 构造is not的PaddleLayer。
TorchScript示例:
%3949 : bool = aten::__isnot__(%size.122, %3931)
参数含义:
%3949 (bool): 输出,第一个元素是否不是第二个元素。
%size.122 (-): 需对比的输入1。
%3931 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%size.122
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%3931
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.isnot", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_layer_norm(mapper, graph, node):
""" 构造层归一化的PaddleLayer。
TorchScript示例:
%input0.4 : Tensor = aten::layer_norm(%input.6, %1181, %174, %173, %70, %71)
参数含义:
%input0.4 (Tensor): 输出,层归一化后的结果。
%input.6 (Tensor): 需要进行层归一化的特征层。
%1181 (list/int/tuple): 需规范化的shape。
%174 (Tensor): weights。
%173 (Tensor): bias。
%70 (float): 指明在计算过程中是否添加较小的值到方差中以防止除零。
%71 (bool): 是否启用cudnn。
"""
if "layernorm" in mapper.dygraph_name_id:
mapper.dygraph_name_id["layernorm"] += 1
else:
mapper.dygraph_name_id["layernorm"] = 0
layernorm_name = "layernorm" + str(mapper.dygraph_name_id["layernorm"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [layernorm_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.6
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%1181
layer_attrs["normalized_shape"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%174
weights = mapper.pytorch_params[inputs_name[2]]
mapper.paddle_params[layernorm_name + ".weight"] = weights
# 处理输入3,即%173
if inputs_name[3] in mapper.pytorch_params:
bias = mapper.pytorch_params[inputs_name[3]]
if bias is not None:
mapper.paddle_params[layernorm_name + ".bias"] = bias
else:
mapper.paddle_params[layernorm_name + ".bias"] = False
# 处理输入4,即%70
layer_attrs["epsilon"] = mapper.attrs[inputs_name[4]]
graph.add_layer(
"paddle.nn.LayerNorm",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_le(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
TorchScript示例:
%80 : bool = aten::le(%78, %79)
参数含义:
%80 (bool): 输出,第一个元素是否小于等于第二个元素。
%78 (-): 需对比的输入1。
%79 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%78
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%79
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.le", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_leaky_relu_(mapper, graph, node):
""" 构造leaky relu激活的PaddleLayer。
TorchScript示例:
%input.117 : Tensor = aten::leaky_relu_(%input.114, %1570)
参数含义:
%input.117 (Tensor): 输出,leaky relu后的结果。
%input.114 (Tensor): 需要leaky relu的Tensor。
%1570 (float): 输入中的元素小于0时的斜率。
"""
if "leaky_relu" in mapper.dygraph_name_id:
mapper.dygraph_name_id["leaky_relu"] += 1
else:
mapper.dygraph_name_id["leaky_relu"] = 0
leaky_relu_name = "leaky_relu" + str(mapper.dygraph_name_id["leaky_relu"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [leaky_relu_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%1570
layer_attrs["negative_slope"] = mapper.attrs[inputs_name[1]]
graph.add_layer(
"paddle.nn.LeakyReLU",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_len(mapper, graph, node):
""" 构造获取list长度的PaddleLayer。
TorchScript示例:
%85 : int = aten::len(%83)
参数含义:
%85 (int): 输出,list的长度。
%72 (list): 需要获取长度的list。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%72
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.len", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_lt(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
TorchScript示例:
%80 : bool = aten::lt(%78, %79)
参数含义:
%80 (bool): 输出,第一个元素是否小于第二个元素。
%78 (-): 需对比的输入1。
%79 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%78
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%79
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.lt", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_masked_fill_(mapper, graph, node):
""" 构造填充mask的PaddleLayer。
TorchScript示例:
%input.4 : Tensor = aten::masked_fill_(%scores.2, %mask.2, %46)
参数含义:
%input.4 (Tensor): 输出,填充后的结果。
%scores.2 (Tensor): 需要填充的Tensor。
%mask.2 (Tensor): bool型的Tensor,哪些位置需要填充。
%46 (-): 填充的值。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输入的list
current_inputs = []
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.4
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
current_inputs.append(inputs_name[0])
graph.add_layer(
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"])
# 处理输入1,即%scores.2
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
current_inputs.append(inputs_name[1])
graph.add_layer(
"paddle.logical_not",
inputs={"x": inputs_name[1]},
outputs=[inputs_name[1] + "_not"])
graph.add_layer(
"fluid.layers.cast",
inputs={"x": inputs_name[1]},
outputs=[inputs_name[1] + "_mask"],
dtype=inputs_name[0] + "_type")
graph.add_layer(
"fluid.layers.cast",
inputs={"x": inputs_name[1] + "_not"},
outputs=[inputs_name[1] + "_not_mask"],
dtype=inputs_name[0] + "_type")
graph.add_layer(
"paddle.multiply",
inputs={"x": inputs_name[0],
"y": inputs_name[1] + "_not_mask"},
outputs=[inputs_name[0] + "_not_mask"])
# 处理输入2,即%46
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[2]},
outputs=[inputs_name[2] + "_cond1"],
y="-float('inf')")
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[2]},
outputs=[inputs_name[2] + "_cond2"],
y="float('inf')")
graph.add_layer(
"prim.or",
inputs={
"x": inputs_name[2] + "_cond1",
"y": inputs_name[2] + "_cond2"
},
outputs=[inputs_name[2] + "_cond"])
graph.add_layer(
"prim.if", {'input': inputs_name[2] + "_cond"},
outputs=[inputs_name[2] + "_if"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"prim.equal",
inputs={"input": inputs_name[1] + "_mask"},
outputs=[inputs_name[2] + "_1"])
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"prim.mul",
inputs={"x": inputs_name[1] + "_mask",
"y": inputs_name[2]},
outputs=[inputs_name[2] + "_1"])
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[1] + "_mask"
if_layer.inputs["input-1"] = inputs_name[2]
if_layer.outputs.append(inputs_name[2] + "_1")
graph.add_layer(
"fluid.layers.elementwise_add",
inputs={"x": inputs_name[2] + "_1",
"y": inputs_name[0] + "_not_mask"},
outputs=layer_outputs)
return current_inputs, current_outputs
def aten_max_pool2d(mapper, graph, node):
""" 构造最大池化的PaddleLayer。
TorchScript示例:
%input.8 : Tensor = aten::max_pool2d(%result.11, %20, %23, %21, %22, %19)
参数含义:
%input.8 (Tensor): 输出,池化后的结果。
%result.11 (Tensor): 需要池化的Tensor。
%20 (list): 池化kernel的大小。
%23 (list): 步长大小。
%21 (list): 填充大小。
%22 (list): 膨胀系数大小。
%19 (bool): 是否用ceil函数计算输出高度和宽度。
"""
if "pool" in mapper.dygraph_name_id:
mapper.dygraph_name_id["pool"] += 1
else:
mapper.dygraph_name_id["pool"] = 0
pool_name = "pool" + str(mapper.dygraph_name_id["pool"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [pool_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.11
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%20
layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%23
layer_attrs["pool_stride"] = mapper.attrs[inputs_name[2]]
# 处理输入3,即%21
layer_attrs["pool_padding"] = mapper.attrs[inputs_name[3]]
# 处理输入4,即%22
graph.add_layer(
"prim.assert",
inputs={},
outputs=[inputs_name[4]],
type="eq",
key=mapper.attrs[inputs_name[4]],
value=[1, [1, 1]])
# 处理输入5,即%19
layer_attrs["ceil_mode"] = mapper.attrs[inputs_name[5]]
layer_attrs["pool_type"] = string("max")
graph.add_layer(
"paddle.nn.Pool2D",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_matmul(mapper, graph, node):
""" 构造矩阵相乘的PaddleLayer。
TorchScript示例:
%output.2 : Tensor = aten::matmul(%101, %111)
参数含义:
%output.2 (Tensor): 输出,相乘后的结果。
%101 (Tensor): 矩阵1。
%102 (Tensor): 矩阵2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%101
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%102
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("paddle.matmul", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_mean(mapper, graph, node):
""" 构造求均值的PaddleLayer。
TorchScript示例:
%x.28 : Tensor = aten::mean(%result.1, %4967, %3, %2)
参数含义:
%x.28 (Tensor): 输出,求均值后的结果。
%result.1 (Tensor): 输入,需要求均值的Tensor。
%4967 (int/list): 求平均值运算的维度。
%3 (bool): 是否在输出Tensor中保留减小的维度。
%2 (Tensor): 结果Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
current_inputs = list(layer_inputs.values())
# 处理输入1,即%4967
if inputs_name[1] in mapper.attrs:
layer_attrs["dim"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["dim"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%3
if inputs_name[1] in mapper.attrs:
layer_attrs["keep_dim"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_inputs["keep_dim"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
"fluid.layers.reduce_mean",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_mul(mapper, graph, node):
""" 构造数值相乘的PaddleLayer。
TorchScript示例:
%size_prods.39 : int = aten::mul(%size_prods.38, %114)
参数含义:
%size_prods.39 (Tensor): 输出,相乘后的结果。
%size_prods.38 (-): 数值1。
%114 (-): 数值2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%size_prods.38
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%114
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer("prim.mul", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_ne(mapper, graph, node):
""" 构造判断数值是否不相等的PaddleLayer。
TorchScript示例:
%134 : bool = aten::ne(%133, %132)
参数含义:
%134 (bool): 对比后结果。
%133 (-): 需对比的输入1。
%132 (-): 需对比的输入2。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.ne", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_neg(mapper, graph, node):
""" 构造对数值取负的PaddleLayer。
TorchScript示例:
%909 : int = aten::neg(%908)
参数含义:
%909 (int): 取负后结果。
%908 (int): 需取负的输入。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.neg", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten___not__(mapper, graph, node):
""" 构造对bool型取负的PaddleLayer。
TorchScript示例:
%4498 : bool = aten::__not__(%aux_defined.2)
参数含义:
%4498 (bool): 取负后结果。
%aux_defined.2 (bool): 需取负的输入。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.not", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_permute(mapper, graph, node):
""" 构造对bool型取负的PaddleLayer。
TorchScript示例:
%2385 : Tensor = aten::permute(%cls_confs0.2, %2384)
参数含义:
%2385 (Tensor): 重排后的结果。
%cls_confs0.2 (Tensor): 需要重排的Tensor。
%2348 (list): 依照此参数进行重排。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%cls_confs0.2
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%2348
if inputs_name[1] in mapper.attrs:
layer_attrs["perm"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["perm"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.transpose",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_pow(mapper, graph, node):
""" 构造指数激活的PaddleLayer。
TorchScript示例:
%x.6 : Tensor = aten::pow(%4700, %4703)
参数含义:
%x.6 (Tensor): 输出,指数激活后的Tensor。
%4700 (Tensor): 需要指数激活的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%4700
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%4703
if inputs_name[1] in mapper.attrs:
layer_attrs["factor"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["factor"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.pow",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_relu(mapper, graph, node):
""" 构造ReLU激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU后的结果。
%result.5 (Tensor): 需要ReLU的Tensor。
注意: inplace这个参数在paddle中未实现
"""
if "relu" in mapper.dygraph_name_id:
mapper.dygraph_name_id["relu"] += 1
else:
mapper.dygraph_name_id["relu"] = 0
relu_name = "relu" + str(mapper.dygraph_name_id["relu"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [relu_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.ReLU", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_relu_(mapper, graph, node):
""" 构造ReLU激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu_(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU后的结果。
%result.5 (Tensor): 需要ReLU的Tensor。
注意: inplace这个参数在paddle中未实现
"""
if "relu" in mapper.dygraph_name_id:
mapper.dygraph_name_id["relu"] += 1
else:
mapper.dygraph_name_id["relu"] = 0
relu_name = "relu" + str(mapper.dygraph_name_id["relu"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [relu_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.ReLU", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_relu6(mapper, graph, node):
""" 构造ReLU6激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu6(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU6后的结果。
%result.5 (Tensor): 需要ReLU6的Tensor。
注意: inplace这个参数在paddle中未实现
"""
if "relu6" in mapper.dygraph_name_id:
mapper.dygraph_name_id["relu6"] += 1
else:
mapper.dygraph_name_id["relu6"] = 0
relu6_name = "relu6" + str(mapper.dygraph_name_id["relu6"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [relu6_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.ReLU6", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_reshape(mapper, graph, node):
""" 构造调整大小的PaddleLayer。
TorchScript示例:
%x.6 : Tensor = aten::reshape(%4700, %4703)
参数含义:
%x.6 (Tensor): 输出,reshape后的Tensor。
%4700 (Tensor): 需要reshape的Tensor。
%4703 (list): 形状大小组成的list。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%4700
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%4703
if inputs_name[1] in mapper.attrs:
layer_attrs["shape"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["shape"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"])
graph.add_layer(
"prim.str",
inputs={"input": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_type"])
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_cond"],
y=string("VarType.BOOL"))
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if1"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"fluid.layers.cast",
inputs={"x": inputs_name[0]},
outputs=[inputs_name[0]],
dtype=string("int32"))
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[0]
graph.add_layer(
"fluid.layers.reshape",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if2"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"fluid.layers.cast",
inputs={"x": layer_outputs[0]},
outputs=layer_outputs,
dtype=string("bool"))
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(block)
if_layer.inputs["input-0"] = layer_outputs[0]
return current_inputs, current_outputs
def aten_rsub(mapper, graph, node):
""" 构造数值相减的PaddleLayer,计算公式为:out = y - alpha * x。
TorchScript示例:
%31 : Tensor = aten::rsub(%30, %13, %7)
参数含义:
%31 (Tensor): 相减结果。
%30 (Tensor): 输入Tensor x。
%13 (int/float): 输入数值 y。
%7 (int/float): alpha。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%30
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%13
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["y"] = inputs_name[1]
# 处理输入2,即%7
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
layer_inputs["alpha"] = inputs_name[2]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.rsub", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_ScalarImplicit(mapper, graph, node):
""" 构造获取scalar的PaddleLayer。
TorchScript示例:
%89 : Scalar = aten::ScalarImplicit(%end.1)
参数含义:
%89 (Scalar): 输出,得到的Scalar。
%end.1 (-): 组要转换的数据。
【注意】由于Paddle无Scalar,所以最后转换为Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%end.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
input_type = list(node.inputs())[0].type()
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
if str(input_type) == "Tensor":
graph.add_layer(
"prim.equal", inputs=layer_inputs, outputs=layer_outputs)
else:
raise Exception(
"The input type {} of aten::ScalarImplicit is not implemented yet!"
).format(input_type)
return current_inputs, current_outputs
def aten_select(mapper, graph, node):
""" 构造选取特定维度Variable的PaddleLayer。
TorchScript示例:
%19 : Tensor = aten::select(%18, %8, %7)
参数含义:
%19 (Tensor): 输出,选取的Tensor。
%18 (Tensor): 需要选取的Tensor。
%8 (int): select的维度。
%7 (int): select的第n个向量。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%18
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 处理输入1,即%8
layer_attrs["dim"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%75
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
layer_inputs["index"] = inputs_name[2]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.select",
inputs=layer_inputs,
outputs=current_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten__set_item(mapper, graph, node):
""" 构造对dict加入元素的PaddleLayer。
TorchScript示例:
= aten::_set_item(%features.1, %out_name.1, %x.3)
参数含义:
%features.1 (list): dict。
%out_name.1 (-): dict的key。
%x.3 (-): dict的value。
"""
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = []
# 处理输入0,即%features.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["dict"] = inputs_name[0]
# 处理输入1,即%out_name.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["key"] = inputs_name[1]
# 处理输入2,即%x.3
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
layer_inputs["value"] = inputs_name[2]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.set_item", inputs=layer_inputs, outputs=[])
return current_inputs, current_outputs
def aten_sigmoid(mapper, graph, node):
""" 构造sigmoid激活的PaddleLayer。
TorchScript示例:
%55 : Tensor = aten::sigmoid(%54)
参数含义:
%55 (Tensor): 输出,sigmoid后的结果。
%54 (Tensor): 需要tanh的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%54
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"fluid.layers.sigmoid", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_sin(mapper, graph, node):
""" 构造数学计算sin的PaddleLayer。
TorchScript示例:
%94 : Tensor = aten::sin(%sinusoid_inp.1)
参数含义:
%94 (Tensor): 输出,sin之后的结果。
%sinusoid_inp.1 (Tensor): 需要进行shape的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%sinusoid_inp.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
graph.add_layer("paddle.sin", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_size(mapper, graph, node):
""" 构造获取shape的PaddleLayer。
TorchScript示例:
%73 : int[] = aten::size(%x.12, %10)
参数含义:
%73 (list): 输出,shape的list。
%x.12 (Tensor): 需要获取shape的Tensor。
%10 (int): 非必须,代表维度。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x.12
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
if len(inputs_name) > 1:
# 处理输入1,即%12
if inputs_name[1] in mapper.attrs:
layer_attrs["dim"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["dim"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"prim.shape_dim",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
graph.add_layer(
"fluid.layers.shape", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_slice(mapper, graph, node):
""" 构造切分list或Variable的PaddleLayer。
TorchScript示例:
%83 : int[] = aten::slice(%73, %_81, %82, %75, %77)
参数含义:
%83 (list/Tensor): 输出,切分后的list。
%73 (list/Tensor): 需要切分的list。
%_81 (int): 切分的维度,不一定存在。
%82 (int): 切分的开始索引。
%75 (int): 切分的结束索引。
%77 (int): 切分的步长。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
if len(inputs_name) == 5:
# 处理输入0,即%73
mapper._check_input(graph, inputs_node[0], inputs_name[0],
current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%_81
if inputs_name[1] in mapper.attrs:
graph.add_layer(
"prim.list",
inputs={},
outputs=[inputs_name[1] + "_list"],
input0=mapper.attrs[inputs_name[1]])
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
graph.add_layer(
"prim.list",
inputs={"input0": inputs_name[1]},
outputs=[inputs_name[1] + "_list"])
current_inputs.append(inputs_name[1])
layer_inputs["axes"] = inputs_name[1] + "_list"
current_inputs.append(inputs_name[1] + "_list")
current_outputs.append(inputs_name[1] + "_list")
# 处理输入2,即%82
if inputs_name[2] in mapper.attrs:
graph.add_layer(
"prim.list",
inputs={},
outputs=[inputs_name[2] + "_list"],
input0=mapper.attrs[inputs_name[2]])
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
graph.add_layer(
"prim.list",
inputs={"input0": inputs_name[2]},
outputs=[inputs_name[2] + "_list"])
current_inputs.append(inputs_name[2])
layer_inputs["starts"] = inputs_name[2] + "_list"
current_inputs.append(inputs_name[2] + "_list")
current_outputs.append(inputs_name[2] + "_list")
# 处理输入3,即%85
if inputs_name[3] in mapper.attrs:
graph.add_layer(
"prim.list",
inputs={},
outputs=[inputs_name[3] + "_list"],
input0=mapper.attrs[inputs_name[3]])
else:
mapper._check_input(graph, inputs_node[3], inputs_name[3],
current_outputs)
graph.add_layer(
"prim.list",
inputs={"input0": inputs_name[3]},
outputs=[inputs_name[3] + "_list"])
current_inputs.append(inputs_name[3])
layer_inputs["ends"] = inputs_name[3] + "_list"
current_inputs.append(inputs_name[3] + "_list")
current_outputs.append(inputs_name[3] + "_list")
# 处理输入4,即%77
if inputs_name[4] in mapper.attrs:
graph.add_layer(
"prim.list",
inputs={},
outputs=[inputs_name[4] + "_list"],
input0=mapper.attrs[inputs_name[4]])
else:
mapper._check_input(graph, inputs_node[4], inputs_name[4],
current_outputs)
graph.add_layer(
"prim.list",
inputs={"input0": inputs_name[4]},
outputs=[inputs_name[4] + "_list"])
current_inputs.append(inputs_name[4])
layer_inputs["strides"] = inputs_name[4] + "_list"
current_inputs.append(inputs_name[4] + "_list")
current_outputs.append(inputs_name[4] + "_list")
graph.add_layer(
"fluid.layers.strided_slice",
inputs=layer_inputs,
outputs=layer_outputs)
else:
# 处理输入0,即%73
mapper._check_input(graph, inputs_node[0], inputs_name[0],
current_outputs)
layer_inputs["input"] = inputs_name[0]
# 处理输入1,即%82
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["start"] = inputs_name[1]
# 处理输入2,即%75
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_inputs["end"] = inputs_name[2]
# 处理输入3,即%77
mapper._check_input(graph, inputs_node[3], inputs_name[3],
current_outputs)
layer_inputs["step"] = inputs_name[3]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.slice", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_softmax(mapper, graph, node):
""" 构造softmax激活的PaddleLayer。
TorchScript示例:
%input2.1 : Tensor = aten::softmax(%input.5, %80, %72)
参数含义:
%input2.1 (Tensor): 激活后结果。
%input.5 (Tensor): 需要激活的Tensor。
%80 (int): 指定对输入Tensor进行运算的轴。
%72 (str): 类型,默认为None。
"""
if "softmax" in mapper.dygraph_name_id:
mapper.dygraph_name_id["softmax"] += 1
else:
mapper.dygraph_name_id["softmax"] = 0
softmax_name = "softmax" + str(mapper.dygraph_name_id["softmax"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [softmax_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x.31
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
layer_attrs["axis"] = mapper.attrs[inputs_name[1]]
graph.add_layer(
"paddle.nn.Softmax",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_softplus(mapper, graph, node):
""" 构造softplus激活的PaddleLayer。
TorchScript示例:
%54 : Tensor = aten::softplus(%x.31, %30, %29)
参数含义:
%54 (Tensor): 激活后结果。
%x.31 (Tensor): 需要激活的Tensor。
%30 (int): beta。
%29 (int): 阈值。
"""
if "softplus" in mapper.dygraph_name_id:
mapper.dygraph_name_id["softplus"] += 1
else:
mapper.dygraph_name_id["softplus"] = 0
softplus_name = "softplus" + str(mapper.dygraph_name_id["softplus"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [softplus_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x.31
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
layer_attrs["beta"] = mapper.attrs[inputs_name[1]]
layer_attrs["threshold"] = mapper.attrs[inputs_name[2]]
graph.add_layer(
"paddle.nn.Softplus",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_stack(mapper, graph, node):
""" 构造堆叠Tensor的PaddleLayer。
TorchScript示例:
%x.222 : Tensor = aten::stack(%32, %7)
参数含义:
%x.222 (Tensor): 输出,堆叠后的结果。
%i.12 (Tensor): 需要堆叠的Tensor组成的Tensor。
%7 (int): 堆叠的轴。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%13
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%12
if inputs_name[1] in mapper.attrs:
layer_attrs["axis"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["axis"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"paddle.stack",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_sub(mapper, graph, node):
""" 构造数值相减的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu(%input.5)
%840 : int = aten::sub(%839, %836)
参数含义:
%result.3 (Tensor): 输出,ReLU后的结果。
%result.5 (Tensor): 需要ReLU的Tensor。
注意: inplace这个参数在paddle中未实现
%840 (-): 相减结果。
%839 (-): 输入数值 x。
%836 (-): 输入数值 y。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -1046,27 +2987,28 @@ def aten_relu(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
# 处理输入0,即%839
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%836
mapper._check_input(
graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"fluid.layers.relu", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer("prim.sub", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_relu_(mapper, graph, node):
""" 构造ReLU激活的PaddleLayer。
def aten_t(mapper, graph, node):
""" 构造矩阵转置的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu_(%input.5)
%840 : int = aten::sub(%839, %836)
参数含义:
%result.3 (Tensor): 输出,ReLU后的结果。
%result.5 (Tensor): 需要ReLU的Tensor。
注意: inplace这个参数在paddle中未实现
%109 (Tensor): 输出,转置后的矩阵。
%102 (Tensor): 需要转置的Tensor。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -1074,30 +3016,36 @@ def aten_relu_(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
# 处理输入0,即%x.12
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"fluid.layers.relu", inputs=layer_inputs, outputs=layer_outputs)
"fluid.layers.transpose",
inputs=layer_inputs,
outputs=layer_outputs,
perm=[1, 0])
return current_inputs, current_outputs
def aten_relu6(mapper, graph, node):
""" 构造ReLU6激活的PaddleLayer。
def aten_tanh(mapper, graph, node):
""" 构造tanh激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu6(%input.5)
%55 : Tensor = aten::tanh(%54)
参数含义:
%result.3 (Tensor): 输出,ReLU6后的结果。
%result.5 (Tensor): 需要ReLU6的Tensor。
注意: inplace这个参数在paddle中未实现
%55 (Tensor): 输出,tanh后的结果。
%54 (Tensor): 需要tanh的Tensor。
"""
if "tanh" in mapper.dygraph_name_id:
mapper.dygraph_name_id["tanh"] += 1
else:
mapper.dygraph_name_id["tanh"] = 0
tanh_name = "tanh" + str(mapper.dygraph_name_id["tanh"])
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_outputs = [tanh_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
......@@ -1109,22 +3057,20 @@ def aten_relu6(mapper, graph, node):
current_inputs = list(layer_inputs.values())
graph.add_layer(
"fluid.layers.relu6",
inputs=layer_inputs,
outputs=layer_outputs,
threshold=6.0)
"paddle.nn.Tanh", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_reshape(mapper, graph, node):
""" 构造调整大小的PaddleLayer。
def aten_split(mapper, graph, node):
""" 构造分割Tensor的PaddleLayer。
TorchScript示例:
%x.6 : Tensor = aten::reshape(%4700, %4703)
%160 : Tensor[] = aten::split(%159, %135, %123)
参数含义:
%x.6 (Tensor): 输出,reshape后的Tensor。
%4700 (Tensor): 需要reshape的Tensor。
%4703 (list): 形状大小组成的list。
%160 (Tensor): 输出,分割后的矩阵。
%159 (Tensor): 需要分割的Tensor。
%135 (int): 分割的数量。
%723 (int): 轴。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -1133,30 +3079,40 @@ def aten_reshape(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%4700
# 处理输入0,即%159
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%4703
layer_inputs["input"] = inputs_name[0]
# 处理输入2,即%723
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
layer_inputs["dim"] = inputs_name[2]
# 处理输入1,即%135
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["shape"] = inputs_name[1]
# 获取当前节点输入、输出的list
input_type = list(node.inputs())[0].type()
if "[]" in str(input_type):
layer_inputs["num_or_sections"] = inputs_name[1]
else:
layer_attrs["num_or_sections"] = 1
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"fluid.layers.reshape", inputs=layer_inputs, outputs=layer_outputs)
"fluid.layers.split",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_select(mapper, graph, node):
""" 构造选取特定维度Variable的PaddleLayer。
def aten_transpose(mapper, graph, node):
""" 构造矩阵转置的PaddleLayer。
TorchScript示例:
%19 : Tensor = aten::select(%18, %8, %7)
%715 : Tensor = aten::transpose(%x.21, %704, %705)
参数含义:
%19 (Tensor): 输出,选取的Tensor
%18 (Tensor): 需要选取的Tensor。
%8 (int): select的维度
%7 (int): select的第n个向量
%715 (Tensor): 输出,转置后的矩阵
%x.21 (Tensor): 需要转置的Tensor。
%704 (int): 转置的维度1
%705 (int): 转置的维度2
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -1165,61 +3121,113 @@ def aten_select(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%18
# 处理输入0,即%x.21
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 处理输入1,即%8
layer_attrs["dim"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%75
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%704
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
dim1 = inputs_name[1]
# 处理输入2,即%705
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
layer_inputs["index"] = inputs_name[2]
dim2 = inputs_name[2]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.select",
"fluid.layers.shape",
inputs={"input": inputs_name[0]},
outputs=[output_name + "_shape"])
current_outputs.append(output_name + "_shape")
graph.add_layer(
"prim.len",
inputs={"input": output_name + "_shape"},
outputs=[output_name + "_len"])
current_outputs.append(output_name + "_len")
current_inputs.append(output_name + "_shape")
graph.add_layer(
"prim.len2list",
inputs={"len": output_name + "_len"},
outputs=[output_name + "_list"])
current_outputs.append(output_name + "_list")
current_inputs.append(output_name + "_len")
graph.add_layer(
"prim.check_dim",
inputs={"len": output_name + "_len",
"dim": dim1},
outputs=[dim1 + "_new"])
graph.add_layer(
"prim.check_dim",
inputs={"len": output_name + "_len",
"dim": dim2},
outputs=[dim2 + "_new"])
graph.add_layer(
"prim.replaceitem",
inputs={
"list": output_name + "_list",
"index": dim1 + "_new",
"item": dim2 + "_new"
},
outputs=[])
graph.add_layer(
"prim.replaceitem",
inputs={
"list": output_name + "_list",
"index": dim2 + "_new",
"item": dim1 + "_new"
},
outputs=[])
graph.add_layer(
"fluid.layers.transpose",
inputs=layer_inputs,
outputs=current_outputs,
**layer_attrs)
outputs=layer_outputs,
perm=output_name + "_list")
return current_inputs, current_outputs
def aten_size(mapper, graph, node):
""" 构造获取shape的PaddleLayer。
def aten_to(mapper, graph, node):
""" 构造类型转换的PaddleLayer。
TorchScript示例:
%73 : int[] = aten::size(%x.12)
%30 : Tensor = aten::to(%extended_attention_mask.1, %12, %5, %5, %4)
参数含义:
%73 (list): 输出,shape的list。
%x.12 (Tensor): 需要获取shape的Tensor。
%30 (Tensor): 转换后的Tensor。
%extended_attention_mask.1 (Tensor): 需要转换的Tensor。
%12 (int): 转换的类型。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x.12
# 处理输入0,即%13
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%12
if len(inputs_name) == 6:
layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[2]]]
else:
layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[1]]]
graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"fluid.layers.cast",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_slice(mapper, graph, node):
""" 构造切分list或Variable的PaddleLayer。
def aten_type_as(mapper, graph, node):
""" 构造转换Tensor类型的PaddleLayer。
TorchScript示例:
%83 : int[] = aten::slice(%73, %82, %75, %77)
%57 : Tensor = aten::type_as(%56, %mask.1)
参数含义:
%83 (list/Tensor): 输出,切分后的list。
%73 (list/Tensor): 需要切分的list。
%82 (int): 切分的开始索引。
%75 (int): 切分的结束索引。
%77 (int): 切分的步长。
%57 (Tensor): 输出,改变类型后的Tensor。
%56 (Tensor): 需要改变类型的Tensor。
%mask.1 (Tensor): 转换成与该Tensor相一致的类型。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -1227,93 +3235,160 @@ def aten_slice(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%73
# 处理输入0,即%56
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 处理输入1,即%82
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["start"] = inputs_name[1]
# 处理输入2,即%75
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
layer_inputs["end"] = inputs_name[2]
# 处理输入3,即%77
mapper._check_input(graph, inputs_node[3], inputs_name[3], current_outputs)
layer_inputs["step"] = inputs_name[3]
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入0,即%mask.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
graph.add_layer(
"prim.type",
inputs={"input": inputs_name[1]},
outputs=[inputs_name[1] + "_type"])
layer_inputs["dtype"] = inputs_name[1] + "_type"
current_inputs.append(inputs_name[1])
graph.add_layer("prim.slice", inputs=layer_inputs, outputs=current_outputs)
graph.add_layer(
"fluid.layers.cast", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_sub(mapper, graph, node):
""" 构造数值相减的PaddleLayer。
def aten_unsqueeze(mapper, graph, node):
""" 构造插入维度的PaddleLayer。
TorchScript示例:
%840 : int = aten::sub(%839, %836)
%13 : Tensor = aten::unsqueeze(%12, %7)
参数含义:
%840 (-): 相减结果
%839 (-): 输入数值 x
%836 (-): 输入数值 y
%13 (Tensor): 输出,插入维度后的Tensor
%12 (Tensor): 需要插入维度的Tensor
%7 (int): 维度
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%839
# 处理输入0,即%13
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%836
mapper._check_input(
graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.sub", inputs=layer_inputs, outputs=layer_outputs)
# 处理输入1,即%12
if inputs_name[1] in mapper.attrs:
layer_attrs["axis"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["axis"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"paddle.tensor.unsqueeze",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_t(mapper, graph, node):
""" 构造矩阵转置的PaddleLayer。
def aten_upsample_bilinear2d(mapper, graph, node):
""" 构造使用bilinear上采样的PaddleLayer。
TorchScript示例:
%840 : int = aten::sub(%839, %836)
%4997 : Tensor = aten::upsample_bilinear2d(%x.13, %4963, %5421, %4995, %4996)
参数含义:
%109 (Tensor): 输出,转置后的矩阵。
%102 (Tensor): 需要转置的Tensor。
%4997 (Tensor): 输出,上采样后的Tensor。
%x.13 (Tensor): 需要上采样的Tensor。
%4963 (list): 上采样后的大小。
%5421 (bool): 若为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。
%4995 (float): 高度的乘数因子。
%4995 (float): 宽度的乘数因子。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x.12
# 处理输入0,即%x.13
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%4963
if inputs_name[1] in mapper.attrs:
layer_attrs["size"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_inputs["size"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.transpose",
"prim.isinstance",
inputs={"input": inputs_name[1]},
outputs=[inputs_name[1] + "_isinstance"],
cls="paddle.fluid.Variable")
graph.add_layer(
"prim.if", {"input": inputs_name[1] + "_isinstance"},
outputs=[inputs_name[0] + "_if1"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"prim.var2list",
inputs={"input": inputs_name[1]},
outputs=[inputs_name[1]])
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[1]
# 处理输入2,即%5421
if inputs_name[2] in mapper.attrs:
layer_attrs["align_corners"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs)
layer_inputs["align_corners"] = inputs_name[2]
current_inputs.append(inputs_name[2])
# 处理输入3和4,构造assert
list_layer_inputs = {}
mapper._check_input(graph, inputs_node[3], inputs_name[3], current_outputs)
list_layer_inputs["key"] = inputs_name[3]
current_inputs.append(inputs_name[3])
mapper._check_input(graph, inputs_node[4], inputs_name[4], current_outputs)
list_layer_inputs["value"] = inputs_name[4]
current_inputs.append(inputs_name[4])
graph.add_layer(
"prim.assert",
inputs=list_layer_inputs,
outputs=[output_name + "_assert"],
type="eq")
layer_inputs["scale_factor"] = inputs_name[3]
layer_attrs["align_mode"] = 0
graph.add_layer(
"paddle.nn.functional.interpolate",
inputs=layer_inputs,
outputs=layer_outputs,
perm=[1, 0])
**layer_attrs)
return current_inputs, current_outputs
def aten_unsqueeze(mapper, graph, node):
""" 构造插入维度的PaddleLayer。
def aten_view(mapper, graph, node):
""" 构造调整大小的PaddleLayer。
TorchScript示例:
%13 : Tensor = aten::unsqueeze(%12, %7)
%input.152 : Tensor = aten::view(%x.20, %430)
参数含义:
%13 (Tensor): 输出,插入维度后的Tensor。
%12 (Tensor): 需要插入维度的Tensor。
%7 (int): 维度。
%input.152 (Tensor): 输出,view后的Tensor。
%x.20 (Tensor): 需要view的Tensor。
%430 (list): 形状大小组成的list。
【注意】view 函数只能用于contiguous后的Tensor上,
也就是只能用于内存中连续存储的Tensor。
如果对Tensor调用过transpose,permute等操作的话会使该Tensor在内存中变得不再连续,
此时就不能再调用view函数。因此,需要先使用contiguous来返回一个contiguous copy。
reshape则不需要依赖目标Tensor是否在内存中是连续的。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
......@@ -1322,24 +3397,65 @@ def aten_unsqueeze(mapper, graph, node):
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%13
# 处理输入0,即%x.20
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%12
# 处理输入1,即%430
if inputs_name[1] in mapper.attrs:
layer_attrs["axes"] = mapper.attrs[inputs_name[1]]
layer_attrs["shape"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_attrs["axes"] = inputs_name[1]
layer_inputs["shape"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"fluid.layers.unsqueeze",
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"])
graph.add_layer(
"prim.str",
inputs={"input": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_type"])
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_cond"],
y=string("VarType.BOOL"))
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if1"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"fluid.layers.cast",
inputs={"x": inputs_name[0]},
outputs=[inputs_name[0]],
dtype=string("int32"))
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[0]
graph.add_layer(
"fluid.layers.reshape",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if2"])
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(if_layer, graph_type="dygraph")
block.add_layer(
"fluid.layers.cast",
inputs={"x": layer_outputs[0]},
outputs=layer_outputs,
dtype=string("bool"))
if_layer.add_block(block)
block = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(block)
if_layer.inputs["input-0"] = layer_outputs[0]
return current_inputs, current_outputs
......@@ -1370,7 +3486,7 @@ def aten_warn(mapper, graph, node):
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs)
layer_attrs["stacklevel"] = inputs_name[1]
layer_inputs["stacklevel"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
......@@ -1379,3 +3495,76 @@ def aten_warn(mapper, graph, node):
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
def aten_where(mapper, graph, node):
""" 构造返回一个根据输入condition, 选择x或y的元素组成的多维Tensor的PaddleLayer,该节点实现out = x + y。
TorchScript示例:
%input.4 : Tensor = aten::where(%209, %w0.2, %210)
参数含义:
%input.4 (Tensor): 选择的结果。
%209 (Tensor): 条件。
%w0.2 (Tensor): 输入数值 x。
%210 (Tensor): 输入数值 y。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%209
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["condition"] = inputs_name[0]
# 处理输入1,即%w0.2
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs)
layer_inputs["x"] = inputs_name[1]
# 处理输入1,即%w0.2
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs)
layer_inputs["y"] = inputs_name[2]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("paddle.where", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def aten_zeros(mapper, graph, node):
""" 构造创建固定形状、数据类型且值全为0的Tensor的PaddleLayer。
TorchScript示例:
%input.49 : Tensor = aten::zeros(%23, %8, %6, %24, %5)
参数含义:
%input.49 (Tensor): 输出,全0的Tensor。
%23 (list): 形状。
%8 (int): 类型dtype。
%6 (int): layout。
%4995 (Device): 设备。
%4995 (bool): 是否计算梯度。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
current_inputs = []
# 处理输入0,即%23,代表end
if inputs_name[0] in mapper.attrs:
layer_attrs["shape"] = mapper.attrs[inputs_name[0]]
else:
mapper._check_input(graph, inputs_node[0], inputs_name[0],
current_outputs)
layer_inputs["shape"] = inputs_name[0]
current_inputs.append(inputs_name[0])
# 处理输入1,即%8,代表dtype
layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[1]]]
graph.add_layer(
"paddle.zeros",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
......@@ -13,6 +13,7 @@
# limitations under the License.
import torch
import numpy as np
from x2paddle.core.util import *
......@@ -27,14 +28,56 @@ def prim_Constant(mapper, graph, node):
output_name = mapper._get_outputs_name(node)[0]
output = list(node.outputs())[0]
value = output.toIValue()
mapper.attrs[output_name] = value
output_type = output.type()
if isinstance(value, str):
value = string(value)
if str(output_type) == "Tensor":
# value = "paddle.to_tensor({})".format(value)
value = "{}".format(value)
if "inf" in str(value):
t = str(type(value)).split("'")[1]
if str(value).startswith("-"):
value = "-{}({})".format(t, string(str(value)[1:]))
else:
value = "{}({})".format(t, string(str(value)))
if "9223372036854775807" in str(value):
import math
value = int(math.pow(2, 31) - 1)
mapper.attrs[output_name] = value
graph.add_layer(
"prim.constant", inputs={}, outputs=[output_name], value=value)
return [], [output_name]
def prim_data(mapper, graph, node):
""" 构造Tensor的PaddleLayer。
TorchScript示例:
%4336 : Tensor = prim::data(%out.6)
参数含义:
%4336 (Tensor): 输出Tensor。
%out.6 (Tensor): 原始Tensor。
【注意】Paddle中无此用法,所以此处翻译成赋值。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%4336
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def prim_GetAttr(mapper, graph, node):
""" 获取attribute信息。
......@@ -61,11 +104,56 @@ def prim_GetAttr(mapper, graph, node):
param = getattr(part_script, field_name)
if isinstance(param, torch.Tensor):
param = param.detach().numpy()
if len(param.shape) == 0:
param = np.reshape(param, 1)
if str(param.dtype) == "uint8":
param = param.astype("int32")
mapper.pytorch_params[output_name] = param
part_script = param
return [], [output_name]
def prim_If(mapper, graph, node):
""" 构造if控制流的PaddleLayer。
TorchScript示例:
%input.5 : Tensor = prim::If(%107)
block0():
%109 : Tensor = aten::t(%102)
%ret.2 : Tensor = aten::addmm(%103, %101, %109, %104, %104)
-> (%ret.2)
block1():
%111 : Tensor = aten::t(%102)
...
-> (%output.4)
参数含义:
%107 (bool): if判断条件。
%input.5 (Tensor): if控制流的输出,与%output.4对应。
"""
outputs_name = mapper._get_outputs_name(node)
node_outputs = outputs_name.copy()
current_outputs = outputs_name.copy()
input_node = list(node.inputs())[0].node()
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
mapper._check_input(graph, input_node, input_node_name, current_outputs)
graph.add_layer("prim.if", {'input': input_node_name}, node_outputs)
current_layer = list(graph.layers.values())[-1]
block0 = list(node.blocks())[0]
block0_graph, graph_inputs0 = mapper.traverse(block0, current_layer)
len0 = 0
for i, input_name in enumerate(graph_inputs0):
current_layer.inputs['input-{}'.format(i)] = input_name
len0 = i
current_layer.add_block(block0_graph)
block1 = list(node.blocks())[1]
block1_graph, graph_inputs1 = mapper.traverse(block1, current_layer)
for i, input_name in enumerate(graph_inputs1):
current_layer.inputs['input-{}'.format(len0 + 1 + i)] = input_name
current_layer.add_block(block1_graph)
return list(current_layer.inputs.values()), current_outputs
def prim_ListConstruct(mapper, graph, node):
""" 构造list的PaddleLayer。
......@@ -92,28 +180,31 @@ def prim_ListConstruct(mapper, graph, node):
return current_inputs, current_outputs
def prim_RaiseException(mapper, graph, node):
""" 构造抛出异常的PaddleLayer。
def prim_ListUnpack(mapper, graph, node):
""" 构造获取list中元素的PaddleLayer。
TorchScript示例:
= prim::RaiseException(%76)
%x1.4 : Tensor, %x2.4 : Tensor = prim::ListUnpack(%4354)
参数含义:
%76 (str): 异常信息。
%x1.4 (Tensor): 输出,list的第一个元素。
%x2.4 (Tensor): 输出,list的第二个元素。
%4354 (list): 列表。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
outputs_name = mapper._get_outputs_name(node)
layer_outputs = outputs_name.copy()
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%76
current_outputs = layer_outputs.copy()
# 处理输入0,即%4354
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.exception", inputs=layer_inputs, outputs=layer_outputs)
"prim.list_unpack", inputs=layer_inputs, outputs=layer_outputs)
mapper.split_len[list(layer_inputs.values())[0]] = len(layer_outputs)
return current_inputs, current_outputs
......@@ -180,68 +271,93 @@ def prim_Loop(mapper, graph, node):
return list(current_layer.inputs.values()), node_outputs
def prim_If(mapper, graph, node):
""" 构造if控制流的PaddleLayer。
def prim_min(mapper, graph, node):
""" 构造min的PaddleLayer。
TorchScript示例:
%input.5 : Tensor = prim::If(%107)
block0():
%109 : Tensor = aten::t(%102)
%ret.2 : Tensor = aten::addmm(%103, %101, %109, %104, %104)
-> (%ret.2)
block1():
%111 : Tensor = aten::t(%102)
...
-> (%output.4)
%87 : int = prim::min(%86)
参数含义:
%107 (bool): if判断条件
%input.5 (Tensor): if控制流的输出,与%output.4对应
%86 (list): 输入
%87 (int): 输出
"""
output_name = mapper._get_outputs_name(node)[0]
node_outputs = [output_name]
input_node = list(node.inputs())[0].node()
script_input_unique_id = list(node.inputs())[0].unique()
input_node_name = mapper.outputs_info[script_input_unique_id]
mapper._check_input(graph, input_node, input_node_name, node_outputs)
graph.add_layer("prim.if", {'input': input_node_name}, [output_name])
current_layer = list(graph.layers.values())[-1]
block0 = list(node.blocks())[0]
block0_graph, graph_inputs0 = mapper.traverse(block0, current_layer)
len0 = 0
for i, input_name in enumerate(graph_inputs0):
current_layer.inputs['input-{}'.format(i)] = input_name
len0 = i
current_layer.add_block(block0_graph)
block1 = list(node.blocks())[1]
block1_graph, graph_inputs1 = mapper.traverse(block1, current_layer)
for i, input_name in enumerate(graph_inputs1):
current_layer.inputs['input-{}'.format(len0 + 1 + i)] = input_name
current_layer.add_block(block1_graph)
return list(current_layer.inputs.values()), node_outputs
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%86
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.min", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def prim_min(mapper, graph, node):
""" 构造min的PaddleLayer。
def prim_NumToTensor(mapper, graph, node):
""" 构造转为Tensor的PaddleLayer。
TorchScript示例:
%87 : int = prim::min(%86)
%other.2 : Tensor = prim::NumToTensor(%1736)
参数含义:
%86 (list): 输入
%87 (int): 输出
%other.2 (Tensor): 输出
%1736 (-): 输入
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%86
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
if inputs_node[0].kind() == "aten::size":
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim_equal", inputs=layer_inputs, outputs=layer_outputs)
else:
layer_inputs["value"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
input_type = list(node.inputs())[0].type()
layer_attrs["dtype"] = input_type
layer_attrs["persistable"] = True
layer_attrs["shape"] = [1]
graph.add_layer(
"fluid.layers.create_global_var",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return current_inputs, current_outputs
graph.add_layer("prim.min", inputs=layer_inputs, outputs=layer_outputs)
def prim_RaiseException(mapper, graph, node):
""" 构造抛出异常的PaddleLayer。
TorchScript示例:
= prim::RaiseException(%76)
参数含义:
%76 (str): 异常信息。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%76
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.exception", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
......@@ -326,7 +442,8 @@ def prim_shape(mapper, graph, node):
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.shape", inputs=layer_inputs, outputs=layer_outputs)
graph.add_layer(
"fluid.layers.shape", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
......@@ -381,6 +498,34 @@ def prim_TupleUnpack(mapper, graph, node):
return current_inputs, current_outputs
def prim_unchecked_cast(mapper, graph, node):
""" 构造确认类型的PaddleLayer。
TorchScript示例:
%size.64 : int[] = prim::unchecked_cast(%size.63)
参数含义:
%size.64 (-): 输出。
%size.63 (-): 输入。
【注意】Paddle中无此用法,所以此处翻译成赋值。
"""
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%size.63
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs)
return current_inputs, current_outputs
def prim_Uninitialized(mapper, graph, node):
""" 构造表示编译器永远不会使用的值的PaddleLayer,该节点转换为None。
......
......@@ -62,28 +62,61 @@ def prim_append(layer, indent=1, init_func=[], forward_func=[]):
def prim_assert(layer, indent=1, init_func=[], forward_func=[]):
if layer.attrs["type"] == "eq":
if isinstance(layer.attrs["value"], list):
values = get_value(layer, "key")
if "value" in layer.attrs:
values = layer.attrs["value"]
if isinstance(values, list):
s = ""
for v in layer.attrs["value"]:
s += "{} == {} or ".format(layer.attrs["key"], v)
for v in values:
s += "{} == {} or ".format(get_value(layer, "key"), v)
if len(s) > 0:
s = s[:-4]
line = "assert {}, \'The {} must be {}!\'".format(
s, layer.attrs["key"], layer.attrs["value"])
s, get_value(layer, "key"), get_value(layer, "value"))
else:
line = "assert {} == {}, \'The {} must be {}!\'".format(
layer.attrs["key"], layer.attrs["value"], layer.attrs["key"],
layer.attrs["value"])
get_value(layer, "key"),
get_value(layer, "value"),
get_value(layer, "key"), get_value(layer, "value"))
else:
raise Exception("Not implement yet!")
forward_func.extend(gen_codes([line], indent=indent))
def prim_check_dim(layer, indent=1, init_func=[], forward_func=[]):
lines = []
lines.append("if {} < 0:".format(get_value(layer, "dim")))
lines.append(" {} = {} + {}".format(layer.outputs[
0], get_value(layer, "dim"), get_value(layer, "len")))
lines.append("else:")
lines.append(" {} = {}".format(layer.outputs[0], get_value(layer,
"dim")))
forward_func.extend(gen_codes(lines, indent=indent))
def prim_constant(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}".format(layer.outputs[0], layer.attrs["value"])
forward_func.extend(gen_codes([line], indent=indent))
def prim_contain(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} in {}".format(layer.outputs[0],
get_value(layer, "element"),
get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_dict(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = dict()".format(layer.outputs[0])
forward_func.extend(gen_codes([line], indent=indent))
def prim_div(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} / {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_eq(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} == {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
......@@ -100,6 +133,36 @@ def prim_exception(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_float(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = float({})".format(layer.outputs[0], get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_floor(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = math.floor({})".format(layer.outputs[0],
get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_floordiv(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} // {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_getitem(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}[{}]".format(layer.outputs[0],
get_value(layer, "list"),
get_value(layer, "index"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_gt(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} > {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_if(layer, indent=1, init_func=[], forward_func=[]):
line = "if {} :".format(get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
......@@ -109,27 +172,40 @@ def prim_if(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(b_forward_lines)
block = layer.blocks[1]
if len(block.layers) > 0:
line = "else:"
forward_func.extend(gen_codes([line], indent=indent))
b_init_lines, b_forward_lines = block.gen_dygraph_code(
indent=indent + 1)
if len(b_forward_lines) != 0:
line = "else:"
forward_func.extend(gen_codes([line], indent=indent))
init_func.extend(b_init_lines)
forward_func.extend(b_forward_lines)
def prim_getitem(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}[{}]".format(layer.outputs[0],
get_value(layer, "list"),
get_value(layer, "index"))
def prim_int(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = int({})".format(layer.outputs[0], get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_gt(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} > {}".format(layer.outputs[0],
def prim_is(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} is {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_isinstance(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = isinstance({}, {})".format(layer.outputs[0],
get_value(layer, "input"),
layer.attrs["cls"])
forward_func.extend(gen_codes([line], indent=indent))
def prim_isnot(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} is not {}".format(layer.outputs[0],
get_value(layer, "x"),
get_value(layer, "y"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_le(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} <= {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
......@@ -141,6 +217,14 @@ def prim_len(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_len2list(layer, indent=1, init_func=[], forward_func=[]):
lines = []
lines.append("{} = []".format(layer.outputs[0]))
lines.append("for i in range({}):".format(get_value(layer, "len")))
lines.append(" {}.append(i)".format(layer.outputs[0]))
forward_func.extend(gen_codes(lines, indent=indent))
def prim_lt(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} < {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
......@@ -157,6 +241,11 @@ def prim_list(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_list_unpack(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}".format(", ".join(layer.outputs), get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_loop(layer, indent=1, init_func=[], forward_func=[]):
loop_range = get_value(layer, "input")
line = "for {} in range({}):".format(layer.outputs[1], loop_range)
......@@ -194,12 +283,33 @@ def prim_not(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_or(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} or {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_replaceitem(layer, indent=1, init_func=[], forward_func=[]):
line = "{}[{}] = {}".format(
get_value(layer, "list"),
get_value(layer, "index"), get_value(layer, "item"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_requires_grad(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = not {}.stop_gradient".format(layer.outputs[0],
get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_rsub(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} - {} * {}".format(layer.outputs[0],
get_value(layer, "y"),
get_value(layer, "x"),
get_value(layer, "alpha"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_select(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}[".format(layer.outputs[0], get_value(layer, "input"))
for dim in range(layer.attrs["dim"]):
......@@ -213,8 +323,17 @@ def prim_set_attr(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_shape(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}.shape".format(layer.outputs[0], get_value(layer, "input"))
def prim_set_item(layer, indent=1, init_func=[], forward_func=[]):
line = "{}[{}] = {}".format(
get_value(layer, "dict"),
get_value(layer, "key"), get_value(layer, "value"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_shape_dim(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = fluid.layers.shape({})[{}]".format(layer.outputs[0],
get_value(layer, "input"),
get_value(layer, "dim"))
forward_func.extend(gen_codes([line], indent=indent))
......@@ -227,6 +346,11 @@ def prim_slice(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_str(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = str({})".format(layer.outputs[0], get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_sub(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {} - {}".format(layer.outputs[0],
get_value(layer, "x"), get_value(layer, "y"))
......@@ -249,6 +373,17 @@ def prim_tuple_unpack(layer, indent=1, init_func=[], forward_func=[]):
forward_func.extend(gen_codes([line], indent=indent))
def prim_type(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}.dtype".format(layer.outputs[0], get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_var2list(layer, indent=1, init_func=[], forward_func=[]):
line = "{} = {}.numpy().tolist()".format(layer.outputs[0],
get_value(layer, "input"))
forward_func.extend(gen_codes([line], indent=indent))
def prim_warnings(layer, indent=1, init_func=[], forward_func=[]):
lines = ["import warnings"]
line = "warnings.warn({}, stacklevel={})".format(
......
......@@ -31,6 +31,7 @@ class PyTorchOpMapper(OpMapper):
self.attrs = {} # key为节点名,value为属性值
self.output_index = 0
self.dygraph_name_id = {} # 动态图__init__输出名字中的id,key为kernel类型,value为id
self.split_len = {} # split的长度
# 转换
self.check_op(decoder.graph)
self.graph, _ = self.traverse(decoder.graph)
......@@ -80,6 +81,7 @@ class PyTorchOpMapper(OpMapper):
node = ivalue.node()
if str(ivalue.type()) != "Tensor":
graph.set_name(str(ivalue.type()).split(".")[-1])
continue
inputs, outputs = self.data(graph, node, ivalue.unique())
# 转换中间节点
for node in script_graph.nodes():
......@@ -108,9 +110,19 @@ class PyTorchOpMapper(OpMapper):
parent_layer=parent_layer,
index=i)
_update_graph_inputs("equal", inputs, outputs)
# 设置graph的参数
# 设置graph的参数和输出节点
if isinstance(script_graph, torch._C.Graph):
graph.set_parameters(self.paddle_params)
if hasattr(script_graph, 'return_node'):
inputs_name, inputs_node = self._get_inputs_name(
script_graph.return_node())
graph.outputs = inputs_name
# 更新split参数
for layer in graph.layers.values():
if layer.kernel == "fluid.layers.split" and "num_or_sections" in layer.attrs:
layer.attrs["num_or_sections"] = self.split_len[layer.outputs[
0]]
return graph, graph_inputs
def _get_outputs_name(self, node, attr_name=None):
......
......@@ -12,13 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .fc_fuser import FcFuser
from .fc_fuse_pass import FcFusePass
from .nn_adaptive_pool2d_fuser import NnAdaptivePool2dFuser
from .nn_adaptive_pool2d_fuse_pass import NnAdaptivePool2dFusePass
from .functional_adaptive_pool2d_fuser import FunctionalAdaptivePool2dFuser
from .functional_adaptive_pool2d_fuse_pass import FunctionalAdaptivePool2dFusePass
from .constant_fuser import ConstantFuser
from .constant_fuse_pass import ConstantFusePass
from .adaptive_pool2d_fuser import AdaptivePool2dFuser
from .adaptive_pool2d_fuse_pass import AdaptivePool2dFusePass
from .batchnorm2d_fuser import BatchNorm2dFuser
from .batchnorm2d_fuse_pass import BatchNorm2dFusePass
from .constant_fuser import ConstantFuser
from .constant_fuse_pass import ConstantFusePass
from .dropout_fuser import DropoutFuser
from .dropout_fuse_pass import DropoutFusePass
from .fc_fuser import FcFuser
from .fc_fuse_pass import FcFusePass
from .interpolate_bilinear_fuser import InterpolateBilinearFuser
from .interpolate_bilinear_fuse_pass import InterpolateBilinearFusePass
from .reshape_fuser import ReshapeFuser
from .reshape_fuse_pass import ReshapeFusePass
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import AdaptivePool2dFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class AdaptivePool2dFusePass(Pass):
name = "adaptive_pool2d_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = AdaptivePool2dFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
adaptive_pool2d_fuse_pass = AdaptivePool2dFusePass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class AdaptivePool2dFuser(FuseBase):
def __init__(self):
super(AdaptivePool2dFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的adaptive pool2d图结构。
adaptive pool2d层模式python实现代码示例:
x68 = fluid.layers.shape(input=x60)
x69 = len(x68)
x70 = x69 <= 2
if x70 :
raise RaiseException('Exception')
x73 = []
x74 = x68[-2: 2147483647: 1]
x75 = len(x74)
x76 = [2, x75]
x77 = min(x76)
for _x79 in range(x77):
x80 = [6, 6][_x79]
x73.append(x80)
x81 = fluid.layers.adaptive_pool2d(input=x60, pool_size=x73, pool_type='avg')
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"fluid.layers.shape",
inputs={'input': "pool-input-0"},
outputs=[gen_name(1)])
self.pattern.add_layer(
"prim.len", inputs={"input": gen_name(1)}, outputs=[gen_name(6)])
self.pattern.add_layer(
"prim.le", inputs={"x": gen_name(6)}, outputs=[gen_name(8)], y=2)
self.pattern.add_layer("prim.if", {'input': gen_name(8)}, [gen_name(9)])
if_layer = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block0 = PaddleGraph(if_layer, graph_type="dygraph")
pattern_block0.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(9)],
input="Exception")
if_layer.add_block(pattern_block0)
pattern_block1 = PaddleGraph(if_layer, graph_type="dygraph")
if_layer.add_block(pattern_block1)
self.pattern.add_layer("prim.list", inputs={}, outputs=[gen_name(10)])
self.pattern.add_layer(
"prim.slice",
inputs={"input": gen_name(1), },
outputs=[gen_name(12)],
start=-1,
end=100,
step=1)
self.pattern.add_layer(
"prim.len", inputs={"input": gen_name(12)}, outputs=[gen_name(14)])
self.pattern.add_layer(
"prim.list",
inputs={"input1": gen_name(14)},
outputs=[gen_name(15)],
input0=2)
self.pattern.add_layer(
"prim.min", inputs={"input": gen_name(15)}, outputs=[gen_name(16)])
self.pattern.add_layer("prim.loop", {'input': gen_name(16)},
[gen_name(17), gen_name(18)])
loop_layer = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_block.add_layer(
"prim.getitem",
inputs={"index": gen_name(18)},
outputs=[gen_name(19)],
list=[6, 6])
pattern_block.add_layer(
"prim.append",
inputs={"list": gen_name(10),
"index": gen_name(19)},
outputs=[gen_name(20)])
loop_layer.inputs["input-0"] = gen_name(10)
loop_layer.add_block(pattern_block)
pool_attrs = {'pool_type': string("avg")}
self.pattern.add_layer(
"fluid.layers.adaptive_pool2d",
inputs={'input': "pool-input-0",
"pool_size": gen_name(10)},
outputs=[gen_name(21)],
**pool_attrs)
self.pattern.build(inputs={"input-0": "pool-input-0", })
def insert_new_layer(self, graph, parameters, matches):
parameters = graph.parameters
new_layer = self.gen_new_layer(parameters, matches)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[11]]
pool_size = layer.attrs["list"]
layer = matches[layers_id[0]]
input_name = layer.inputs["input"]
layer = matches[layers_id[-1]]
output_name = layer.outputs[0]
pool_type = layer.attrs["pool_type"]
attrs = dict()
attrs["pool_size"] = pool_size
attrs["pool_type"] = pool_type
new_layer = PaddleLayer(
layers_id[0],
"fluid.layers.adaptive_pool2d",
inputs={"input": input_name},
outputs=[output_name],
**attrs)
return new_layer
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import BatchNorm2dFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class BatchNorm2dFusePass(Pass):
name = "batchnorm2d_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = BatchNorm2dFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
batchnorm2d_fuse_pass = BatchNorm2dFusePass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class BatchNorm2dFuser(FuseBase):
def __init__(self):
super(BatchNorm2dFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的batchnorm2d图结构。
batchnorm2d层模式python实现代码示例:
x336 = fluid.layers.shape(input=x334)
x336 = len(x336)
x337 = x336 != 4
if x337 :
raise RaiseException('Exception')
if False :
x351 = fluid.layers.shape(input=x334)
x352 = x351[0]
x353 = len(x351)
x354 = x353 - 2
x357 = x352
for _x356 in range(x354):
x358 = _x356 + 2
x359 = x351[x358]
x360 = x357 * x359
x355 = x360
x361 = x355 == 1
if x361 :
raise RaiseException('Exception')
x364 = self.batchnorm7(x334)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"fluid.layers.shape",
inputs={'input': "bn-input-0"},
outputs=[gen_name(0)])
self.pattern.add_layer(
"prim.len", inputs={'input': gen_name(0)}, outputs=[gen_name(0)])
self.pattern.add_layer(
"prim.ne", inputs={"x": gen_name(0)}, outputs=[gen_name(1)], y=4)
self.pattern.add_layer("prim.if", {'input': gen_name(1)}, [gen_name(2)])
if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block0 = PaddleGraph(if_layer1, graph_type="dygraph")
pattern_block0.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(3)],
input="Exception")
if_layer1.add_block(pattern_block0)
pattern_block1 = PaddleGraph(if_layer1, graph_type="dygraph")
if_layer1.add_block(pattern_block1)
self.pattern.add_layer("prim.if", {}, [gen_name(4)], input=False)
if_layer2 = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
pattern_block0 = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block0.add_layer(
"fluid.layers.shape",
inputs={'input': "bn-input-0"},
outputs=[gen_name(5)])
pattern_block0.add_layer(
"prim.getitem",
inputs={"list": gen_name(5)},
outputs=[gen_name(6)],
index=0)
pattern_block0.add_layer(
"prim.len", inputs={"input": gen_name(5)}, outputs=[gen_name(7)])
pattern_block0.add_layer(
"prim.sub", inputs={"x": gen_name(7)}, outputs=[gen_name(8)], y=2)
pattern_block0.add_layer(
"prim.equal", inputs={"input": gen_name(6)}, outputs=[gen_name(9)])
pattern_block0.add_layer(
"prim.loop",
inputs={"input": gen_name(8)},
outputs=[gen_name(8.1), gen_name(10)])
loop_layer = pattern_block0.layers[list(pattern_block0.layers.keys())[
-1]]
pattern_block0_block0 = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_block0_block0.add_layer(
"prim.add", inputs={"x": gen_name(10)}, outputs=[gen_name(11)], y=2)
pattern_block0_block0.add_layer(
"prim.getitem",
inputs={"list": gen_name(5),
"index": gen_name(11)},
outputs=[gen_name(12)])
pattern_block0_block0.add_layer(
"prim.mul",
inputs={"x": gen_name(9),
"y": gen_name(12)},
outputs=[gen_name(13)])
pattern_block0_block0.add_layer(
"prim.equal",
inputs={"input": gen_name(13)},
outputs=[gen_name(8.1)])
loop_layer.inputs["input-1"] = gen_name(5)
loop_layer.inputs["input-2"] = gen_name(9)
loop_layer.add_block(pattern_block0_block0)
pattern_block0.add_layer(
"prim.eq", inputs={"x": gen_name(8.1)}, outputs=[gen_name(14)], y=1)
pattern_block0.add_layer(
"prim.if", inputs={"input": gen_name(14)}, outputs=[gen_name(15)])
if_layer21 = pattern_block0.layers[list(pattern_block0.layers.keys())[
-1]]
pattern_block0_block0 = PaddleGraph(if_layer21, graph_type="dygraph")
pattern_block0_block0.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(15)],
input="Exception")
if_layer21.add_block(pattern_block0_block0)
pattern_block0_block1 = PaddleGraph(if_layer21, graph_type="dygraph")
if_layer21.add_block(pattern_block0_block1)
if_layer2.add_block(pattern_block0)
pattern_block1 = PaddleGraph(if_layer2, graph_type="dygraph")
if_layer2.add_block(pattern_block1)
if_layer2.inputs["input-0"] = "bn-input-0"
self.pattern.add_layer(
"paddle.nn.BatchNorm",
inputs={"input": "bn-input-0"},
outputs=[gen_name(16), gen_name(17)],
is_test=True,
num_channels=160,
momentum=0.1,
epsilon=0.001)
self.pattern.build(inputs={"input-0": "bn-input-0"})
def insert_new_layer(self, graph, parameters, matches):
new_layer = self.gen_new_layer(parameters, matches)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id)
# for layer in matches.values():
# print(layer.outputs)
# print("-------")
def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[-1]]
return layer
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import ConstantFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class ConstantFusePass(Pass):
name = "constant_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = ConstantFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
constant_fuse_pass = ConstantFuser()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class ConstantFuser(FuseBase):
def __init__(self):
super(ConstantFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的constant图结构。
constant层模式python实现代码示例:
x3 = 10
for _x70 in range(x3):
...
"""
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=["x1"], value=2)
self.pattern.build()
self.pattern.outputs = ["x1"]
def insert_new_layer(self, graph, parameters, matches):
def replace_value(layer_connect, match_name, match_value):
for k, v in layer_connect.inputs.items():
if v == match_name:
layer_connect.inputs.pop(k)
layer_connect.attrs[k] = match_value
break
for k, v in layer_connect.attrs.items():
if v == match_name:
layer_connect.attrs[k] = match_value
break
if layer_connect.kernel == "prim.loop" or \
layer_connect.kernel == "prim.if":
for block in layer_connect.blocks:
for b_layer_id, b_layer in block.layers.items():
if block.edges_in.get(b_layer_id, 0) != 0 and \
-1 in block.edges_in[b_layer_id]:
replace_value(b_layer, match_name, match_value)
layer_id = list(matches.keys())[0]
layer = list(matches.values())[0]
layer_output_name = layer.outputs[0]
layer_value = layer.attrs["value"]
if graph.edges_out.get(layer_id, 0) != 0:
for layer_id_out in graph.edges_out[layer_id]:
layer_connect = graph.layers[layer_id_out]
replace_value(layer_connect, layer_output_name, layer_value)
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import DropoutFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class DropoutFusePass(Pass):
name = "dropout_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = DropoutFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
dropout_fuse_pass = DropoutFuser()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class DropoutFuser(FuseBase):
def __init__(self):
super(DropoutFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的constant图结构。
constant层模式python实现代码示例:
x3 = 10
for _x70 in range(x3):
...
"""
self.pattern.add_layer(
"paddle.nn.Dropout",
inputs={"input": "dropout-input-0"},
outputs=["dropout0", "x1"])
self.pattern.build(inputs={"input-0": "dropout-input-0"})
self.pattern.outputs = ["dropout0", "x1"]
def insert_new_layer(self, graph, parameters, matches):
def replace_value(layer_connect, match_name, match_input):
for k, v in layer_connect.inputs.items():
if v == match_name:
layer_connect.inputs[k] = match_input
break
if layer_connect.kernel == "prim.loop" or \
layer_connect.kernel == "prim.if":
for block in layer_connect.blocks:
for b_layer_id, b_layer in block.layers.items():
if block.edges_in.get(b_layer_id, 0) != 0 and \
-1 in block.edges_in[b_layer_id]:
replace_value(b_layer, match_name, match_input)
layer_id = list(matches.keys())[0]
layer = list(matches.values())[0]
layer_output_name = layer.outputs[1]
layer_input = layer.inputs["input"]
if graph.edges_out.get(layer_id, 0) != 0:
for layer_id_out in graph.edges_out[layer_id]:
layer_connect = graph.layers[layer_id_out]
replace_value(layer_connect, layer_output_name, layer_input)
......@@ -26,47 +26,38 @@ class FcFuser(FuseBase):
def build_pattern(self):
""" 描述需要替换的fc图结构。
fc层模式python实现代码示例:
x149 = 2
x151 = x146.shape
x151 = len(x151)
x152 = x151 == x149
if x152 :
x147 = self.x147
x154 = fluid.layers.transpose(x=x147, perm=[1, 0])
x148 = self.x148
x155 = fluid.layers.addmm(input=x148, x=x146, y=x154, beta=1, alpha=1)
x153 = x155
x133 = x128.shape
x133 = len(x133)
x134 = x133 == 2
if x134 :
classifier_6_weight = self.classifier_6_weight
x136 = fluid.layers.transpose(x=classifier_6_weight, perm=[1, 0])
classifier_6_bias = self.classifier_6_bias
x137 = paddle.addmm(input=classifier_6_bias, x=x128, y=x136, beta=1, alpha=1)
x135 = x137
else:
x147 = self.x147
x157 = fluid.layers.transpose(x=x147, perm=[1, 0])
x158 = fluid.layers.matmul(x=x146, y=x157)
x159 = True
if x159 :
x148 = self.x148
x161 = x158 + 1 * x148
x160 = x161
else:
x160 = x158
x153 = x160
classifier_6_weight = self.classifier_6_weight
x138 = fluid.layers.transpose(x=classifier_6_weight, perm=[1, 0])
x139 = fluid.layers.matmul(x=x128, y=x138)
classifier_6_bias = self.classifier_6_bias
x140 = x139 + 1 * classifier_6_bias
x135 = x140
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(0)], value=2)
self.pattern.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(1)], value=1)
self.pattern.add_layer(
"prim.shape", inputs={'input': "fc-input-0"},
"fluid.layers.shape",
inputs={'input': "fc-input-0"},
outputs=[gen_name(2)])
self.pattern.add_layer(
"prim.len", inputs={'input': gen_name(2)}, outputs=[gen_name(2)])
self.pattern.add_layer(
"prim.eq",
inputs={"eq0": gen_name(2),
"eq1": gen_name(0)},
outputs=[gen_name(3)])
inputs={"eq0": gen_name(2)},
outputs=[gen_name(3)],
eq1=2)
self.pattern.add_layer("prim.if", {'input': gen_name(3)}, [gen_name(4)])
self.pattern.outputs.append(gen_name(4))
if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[-1]]
......@@ -87,7 +78,7 @@ class FcFuser(FuseBase):
outputs=[gen_name(7)],
value="params[{}]".format(string(gen_name(7))))
pattern_block0.add_layer(
"fluid.layers.addmm",
"paddle.addmm",
inputs={"input": gen_name(7),
"x": "fc-input-0",
"y": gen_name(6)},
......@@ -111,44 +102,25 @@ class FcFuser(FuseBase):
outputs=[gen_name(6)],
perm=[1, 0])
pattern_block1.add_layer(
"fluid.layers.matmul",
"paddle.matmul",
inputs={"x": "fc-input-0",
"y": gen_name(6)},
outputs=[gen_name(9)])
if_layer1.inputs["input-1"] = "fc-input-0"
pattern_block1.add_layer(
"prim.constant", inputs={}, outputs=[gen_name(10)], value=True)
pattern_block1.add_layer("prim.if", {'input': gen_name(10)},
[gen_name(11)])
if_layer2 = pattern_block1.layers[list(pattern_block1.layers.keys())[
-1]]
pattern_block1_block0 = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block1_block0.add_layer(
"fluid.dygraph.base.to_variable",
inputs={},
outputs=[gen_name(12)],
value="params[{}]".format(string(gen_name(12))))
pattern_block1_block0.add_layer(
pattern_block1.add_layer(
"prim.add_",
inputs={"x": gen_name(9),
"y": gen_name(12)},
outputs=[gen_name(13)],
alpha=1)
if_layer2.inputs["input-0"] = gen_name(9)
pattern_block1_block0.add_layer(
"prim.equal",
inputs={'input': gen_name(13)},
outputs=[gen_name(11)])
if_layer2.add_block(pattern_block1_block0)
pattern_block1_block1 = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block1_block1.add_layer(
"prim.equal", inputs={'input': gen_name(9)},
outputs=[gen_name(11)])
if_layer2.inputs["input-1"] = gen_name(9)
pattern_block1.add_layer(
"prim.equal", inputs={'input': gen_name(11)},
"prim.equal", inputs={'input': gen_name(13)},
outputs=[gen_name(4)])
if_layer2.add_block(pattern_block1_block1)
if_layer1.add_block(pattern_block1)
self.pattern.build(inputs={"input-0": "fc-input-0"})
......@@ -160,17 +132,17 @@ class FcFuser(FuseBase):
def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[2]]
layer = matches[layers_id[0]]
input_name = layer.inputs["input"]
layer = matches[layers_id[5]]
layer = matches[layers_id[3]]
output_name = layer.outputs[0]
layer = matches[layers_id[6]]
layer = matches[layers_id[4]]
weight_name = layer.attrs["value"][8:-2]
layer = matches[layers_id[8]]
layer = matches[layers_id[6]]
bias_name = layer.attrs["value"][8:-2]
attrs = dict()
attrs["input_dim"] = parameters[weight_name].shape[1]
attrs["output_dim"] = parameters[weight_name].shape[0]
attrs["in_features"] = parameters[weight_name].shape[1]
attrs["out_features"] = parameters[weight_name].shape[0]
linear_name = "linear{}".format(self.linear_index)
self.linear_index += 1
parameters["{}.weight".format(linear_name)] = parameters[
......@@ -179,7 +151,7 @@ class FcFuser(FuseBase):
bias_name])
new_layer = PaddleLayer(
layers_id[0],
"fluid.dygraph.Linear",
"paddle.nn.Linear",
inputs={"input": input_name},
outputs=[linear_name, output_name],
**attrs)
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import InterpolateBilinearFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class InterpolateBilinearFusePass(Pass):
name = "interpolate_bilinear_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = InterpolateBilinearFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
interpolate_bilinear_fuse_pass = InterpolateBilinearFusePass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class InterpolateBilinearFuser(FuseBase):
def __init__(self):
super(InterpolateBilinearFuser, self).__init__(graph_type="dygraph")
import torch
torch_version = torch.__version__
torch_version_part = torch_version.split(".")
if int(torch_version_part[0]) == 1 and int(torch_version_part[1]) > 5:
self.version_gt_150 = True
else:
self.version_gt_150 = False
def build_pattern(self):
""" 描述需要替换的双线性插值图结构。
interpolate_bilinear层模式python实现代码示例:
x3016 = fluid.layers.shape(input=x3005)
x3016 = len(x3016)
x3017 = x3016 - 2
x3018 = []
for _x3020 in range(x3017):
x3018.append(None)
x3021 = (x3005, x8, None, None)
x3022 = fluid.layers.shape(input=x3005)
x3022 = len(x3022)
x3023 = x3022 == 3
if x3023 :
raise RaiseException('Exception')
x3024 = None
else:
x3026 = fluid.layers.shape(input=x3005)
x3026 = len(x3026)
x3027 = x3026 == 4
if x3027 :
x3044, x3045, x3046, x3047 = x3021
x3048 = x3045 is None
if x3048 :
x3051 = x3046 is None
x3049 = x3051
x3050 = x3045
else:
x3052 = x3045
x3049 = False
x3050 = x3052
if x3049 :
raise RaiseException('Exception')
x3055 = x3050 is not None
if x3055 :
x3058 = x3050
x3059 = x3046 is not None
x3056 = x3059
x3057 = x3058
else:
x3056 = False
x3057 = x3050
if x3056 :
raise RaiseException('Exception')
x3060 = None
x3061 = None
else:
x3060 = x3046
x3061 = x3057
x3063 = x3060 is not None
if x3063 :
x3065 = x3060
x3066 = len(x3065)
x3067 = x3066 != 2
if x3067 :
raise RaiseException('Exception')
x3064 = x3065
else:
x3064 = x3060
x3070 = x3061 is not None
if x3070 :
x3072 = x3061
x3071 = x3072
else:
x3071 = None
if x3070 :
x3073 = x3071
else:
x3074 = x3064 is not None
if x3074 :
x3076 = x3064
x3075 = x3076
else:
raise RaiseException('Exception')
x3075 = None
x3078 = x3047 is None
if x3078 :
x3080 = len(x3075)
x3081 = x3080 > 0
x3086 = 0
for x3083 in range(2147483647):
x3087 = x3075[x3086]
x3088 = math.floor(x3087)
x3089 = x3088 != x3087
if x3089 :
x3090 = False
x3091 = x3089
else:
x3090 = None
x3091 = None
if x3089 :
x3092 = x3090
x3093 = x3091
else:
x3092 = True
x3093 = x3089
x3094 = x3086 + 1
x3095 = x3094 < x3080
x3096 = x3095 and x3092
x3082 = x3093
x3083 = x3094
if x3082 :
import warnings
warnings.warn('The default behavior for interpolate/upsample with float scale_factor will change in 1.6.0 to align with other frameworks/libraries, and use scale_factor directly, instead of relying on the computed output size. If you wish to keep the old behavior, please set recompute_scale_factor=True. See the documentation of nn.Upsample for details. ', stacklevel=2)
x3099 = []
for _x3101 in range(2):
x3102 = _x3101 + 2
x3103 = fluid.layers.shape(x3044)[x3102]
x3104 = float(x3103)
x3105 = x3075[_x3101]
x3106 = x3104 * x3105
x3107 = math.floor(x3106)
x3099.append(x3107)
x3073 = x3099
x3108 = x3018[0]
x3109 = x3018[1]
x3073_isinstance = isinstance(x3073, paddle.fluid.Variable)
if x3073_isinstance :
x3073 = x3073.numpy().tolist()
assert x3108 == x3109, 'The x3108 must be x3109!'
x3110 = paddle.nn.functional.interpolate(x=x3005, size=x3073, scale_factor=x3108, align_corners=False, align_mode=0)
x3028 = x3110
else:
x3111 = fluid.layers.shape(input=x3005)
x3111 = len(x3111)
x3112 = x3111 == 5
if x3112 :
raise RaiseException('Exception')
else:
raise RaiseException('Exception')
x3028 = None
x3024 = x3028
"""
def gen_name(id):
return "x" + str(id)
if self.version_gt_150:
self.pattern.add_layer(
"fluid.layers.shape",
inputs={"input": "interpolate-input-0"},
outputs=[gen_name(9)])
self.pattern.add_layer(
"prim.len",
inputs={"input": gen_name(9)},
outputs=[gen_name(9)])
self.pattern.add_layer(
"prim.sub",
inputs={"x": gen_name(9)},
outputs=[gen_name(10)],
y=2)
self.pattern.add_layer(
"prim.list", inputs={}, outputs=[gen_name(11)])
self.pattern.add_layer(
"prim.loop",
inputs={"input": gen_name(10)},
outputs=[gen_name(12.1), gen_name(12.2)])
loop_layer = self.pattern.layers[list(self.pattern.layers.keys())[
-1]]
pattern_block = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_block.add_layer(
"prim.append",
inputs={"list": gen_name(11)},
outputs=[],
element=None)
loop_layer.inputs["input-0"] = gen_name(11)
loop_layer.add_block(pattern_block)
self.pattern.add_layer(
"prim.tuple",
inputs={
"input0": "interpolate-input-0",
"input1": "interpolate-input-1",
},
outputs=[gen_name(13)],
input2=None,
input3=None)
self.pattern.add_layer(
"fluid.layers.shape",
inputs={"input": "interpolate-input-0"},
outputs=[gen_name(14)])
self.pattern.add_layer(
"prim.len",
inputs={"input": gen_name(14)},
outputs=[gen_name(14)])
self.pattern.add_layer(
"prim.eq",
inputs={"x": gen_name(14)},
outputs=[gen_name(15)],
y=3)
self.pattern.add_layer(
"prim.if",
inputs={"input": gen_name(15)},
outputs=[gen_name(16)])
if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[
-1]]
pattern_block = PaddleGraph(if_layer1, graph_type="dygraph")
pattern_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(17)],
input="Exception")
pattern_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(16)], input=None)
if_layer1.add_block(pattern_block)
pattern_block = PaddleGraph(if_layer1, graph_type="dygraph")
pattern_block.add_layer(
"fluid.layers.shape",
inputs={"input": "interpolate-input-0"},
outputs=[gen_name(18)])
pattern_block.add_layer(
"prim.len",
inputs={"input": gen_name(18)},
outputs=[gen_name(18)])
pattern_block.add_layer(
"prim.eq",
inputs={"x": gen_name(18)},
outputs=[gen_name(19)],
y=4)
pattern_block.add_layer(
"prim.if",
inputs={"input": gen_name(19)},
outputs=[gen_name(20)])
if_layer2 = pattern_block.layers[list(pattern_block.layers.keys())[
-1]]
pattern_block_block = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block_block.add_layer(
"prim.tuple_unpack",
inputs={"input": gen_name(13)},
outputs=[
gen_name(34), gen_name(35), gen_name(36), gen_name(37)
])
pattern_block_block.add_layer(
"prim.is",
inputs={"x": gen_name(35)},
outputs=[gen_name(38)],
y=None)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(38)},
outputs=[gen_name(39), gen_name(40)])
if_layer3 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer3, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.is",
inputs={"x": gen_name(36)},
outputs=[gen_name(41)],
y=None)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(41)},
outputs=[gen_name(39)])
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(35)},
outputs=[gen_name(40)])
if_layer3.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer3, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(35)},
outputs=[gen_name(42)])
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(39)], input=False)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(35)},
outputs=[gen_name(40)])
if_layer3.add_block(pattern_block_block_block)
if_layer3.inputs.update({
"input-0": gen_name(36),
'input-1': gen_name(35),
'input-2': gen_name(35),
})
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(39)},
outputs=[gen_name(43)])
if_layer4 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer4, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(44)],
input="Exception")
if_layer4.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer4, graph_type="dygraph")
if_layer4.add_block(pattern_block_block_block)
pattern_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(40)},
outputs=[gen_name(45)],
y=None)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(45)},
outputs=[gen_name(46), gen_name(47)])
if_layer5 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer5, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(40)},
outputs=[gen_name(48)])
pattern_block_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(36)},
outputs=[gen_name(49)],
y=None)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(49)},
outputs=[gen_name(46)])
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(48)},
outputs=[gen_name(47)])
if_layer5.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer5, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(46)], input=False)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(40)},
outputs=[gen_name(47)])
if_layer5.add_block(pattern_block_block_block)
if_layer5.inputs.update({
"input-0": gen_name(40),
"input-1": gen_name(36),
"input-3": gen_name(40)
})
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(46)},
outputs=[gen_name(50), gen_name(51)])
if_layer6 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer6, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(52)],
input="Exception")
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(50)], input=None)
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(51)], input=None)
if_layer6.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer6, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(36)},
outputs=[gen_name(50)])
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(47)},
outputs=[gen_name(51)])
if_layer6.add_block(pattern_block_block_block)
if_layer6.inputs.update({
"input-0": gen_name(36),
"input-1": gen_name(47)
})
pattern_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(50)},
outputs=[gen_name(53)],
y=None)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(53)},
outputs=[gen_name(54)])
if_layer7 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer7, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(50)},
outputs=[gen_name(55)])
pattern_block_block_block.add_layer(
"prim.len",
inputs={"input": gen_name(55)},
outputs=[gen_name(56)])
pattern_block_block_block.add_layer(
"prim.ne",
inputs={"x": gen_name(56)},
outputs=[gen_name(57)],
y=2)
pattern_block_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(57)},
outputs=[gen_name(58)])
if_layer8 = pattern_block_block_block.layers[list(
pattern_block_block_block.layers.keys())[-1]]
pattern_block_block_block_block = PaddleGraph(
if_layer8, graph_type="dygraph")
pattern_block_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(59)],
input="Exception")
if_layer8.add_block(pattern_block_block_block_block)
pattern_block_block_block_block = PaddleGraph(
if_layer8, graph_type="dygraph")
if_layer8.add_block(pattern_block_block_block_block)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(55)},
outputs=[gen_name(54)])
if_layer7.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer7, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(50)},
outputs=[gen_name(54)])
if_layer7.add_block(pattern_block_block_block)
if_layer7.inputs.update({
"input-0": gen_name(50),
"input-1": gen_name(50)
})
pattern_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(51)},
outputs=[gen_name(60)],
y=None)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(60)},
outputs=[gen_name(61)])
if_layer9 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer9, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(51)},
outputs=[gen_name(62)])
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(62)},
outputs=[gen_name(61)])
if_layer9.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer9, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(54)},
outputs=[gen_name(64)],
y=None)
pattern_block_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(64)},
outputs=[gen_name(65)])
if_layer11 = pattern_block_block_block.layers[list(
pattern_block_block_block.layers.keys())[-1]]
pattern_block_block_block_block = PaddleGraph(
if_layer11, graph_type="dygraph")
pattern_block_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(54)},
outputs=[gen_name(66)])
pattern_block_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(66)},
outputs=[gen_name(65)])
if_layer11.add_block(pattern_block_block_block_block)
pattern_block_block_block_block = PaddleGraph(
if_layer11, graph_type="dygraph")
pattern_block_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(67)],
input="Exception")
pattern_block_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(65)], input=None)
if_layer11.add_block(pattern_block_block_block_block)
if_layer11.inputs.update({"input-0": gen_name(54), })
pattern_block_block_block.add_layer(
"prim.is",
inputs={"x": gen_name(37)},
outputs=[gen_name(68)],
y=None)
pattern_block_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(68)},
outputs=[gen_name(69)])
if_layer12 = pattern_block_block_block.layers[list(
pattern_block_block_block.layers.keys())[-1]]
pattern_block_block_block_block = PaddleGraph(
if_layer12, graph_type="dygraph")
pattern_block_block_block_block.add_layer(
"prim.len",
inputs={"input": gen_name(65)},
outputs=[gen_name(70)])
pattern_block_block_block_block.add_layer(
"prim.gt",
inputs={"x": gen_name(70)},
outputs=[gen_name(71)],
y=0)
pattern_block_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(72)], input=0)
pattern_block_block_block_block.add_layer(
"prim.loop",
inputs={},
outputs=[gen_name(74), gen_name(75), gen_name(76.1)],
input=2147483647)
loop_layer = pattern_block_block_block_block.layers[list(
pattern_block_block_block_block.layers.keys())[-1]]
pattern_loop_block = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_loop_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(65),
"element": gen_name(72)},
outputs=[gen_name(74.1)])
pattern_loop_block.add_layer(
"prim.floor",
inputs={"input": gen_name(74.1)},
outputs=[gen_name(75.1)])
pattern_loop_block.add_layer(
"prim.ne",
inputs={"x": gen_name(75.1),
"y": gen_name(74.1)},
outputs=[gen_name(76)])
pattern_loop_block.add_layer(
"prim.if",
inputs={"input": gen_name(76)},
outputs=[gen_name(77)])
if_layer13 = pattern_loop_block.layers[list(
pattern_loop_block.layers.keys())[-1]]
pattern_loop_block_block = PaddleGraph(
if_layer13, graph_type="dygraph")
pattern_loop_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(77)], input=False)
if_layer13.add_block(pattern_loop_block_block)
pattern_loop_block_block = PaddleGraph(
if_layer13, graph_type="dygraph")
pattern_loop_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(77)], input=True)
if_layer13.add_block(pattern_loop_block_block)
pattern_loop_block.add_layer(
"prim.add",
inputs={"x": gen_name(72)},
outputs=[gen_name(81)],
y=1)
pattern_loop_block.add_layer(
"prim.lt",
inputs={"x": gen_name(81),
"y": gen_name(70)},
outputs=[gen_name(82)])
pattern_loop_block.add_layer(
"prim.and",
inputs={"x": gen_name(82),
"y": gen_name(77)},
outputs=[gen_name(83)])
pattern_loop_block.add_layer(
"prim.equal",
inputs={"input": gen_name(76)},
outputs=[gen_name(74)])
pattern_loop_block.add_layer(
"prim.equal",
inputs={"input": gen_name(81)},
outputs=[gen_name(75)])
loop_layer.add_block(pattern_loop_block)
loop_layer.inputs.update({
"input-0": gen_name(65),
"input-1": gen_name(72),
"input-2": gen_name(72),
"input-3": gen_name(70)
})
pattern_block_block_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(74)},
outputs=[gen_name(84)])
if_layer15 = pattern_block_block_block_block.layers[list(
pattern_block_block_block_block.layers.keys())[-1]]
pattern_block_block_block_block_block = PaddleGraph(
if_layer15, graph_type="dygraph")
pattern_block_block_block_block_block.add_layer(
"prim.warnings",
inputs={},
outputs=[gen_name(85)],
stacklevel=2,
input="...")
if_layer15.add_block(pattern_block_block_block_block_block)
pattern_block_block_block_block_block = PaddleGraph(
if_layer15, graph_type="dygraph")
if_layer15.add_block(pattern_block_block_block_block_block)
if_layer12.add_block(pattern_block_block_block_block)
pattern_block_block_block_block = PaddleGraph(
if_layer12, graph_type="dygraph")
if_layer12.add_block(pattern_block_block_block_block)
if_layer12.inputs.update({
"input-0": gen_name(65),
"input-1": gen_name(65),
})
pattern_block_block_block.add_layer(
"prim.list", inputs={}, outputs=[gen_name(86)])
pattern_block_block_block.add_layer(
"prim.loop",
inputs={},
outputs=[gen_name(87), gen_name(88)],
input=2)
loop_layer = pattern_block_block_block.layers[list(
pattern_block_block_block.layers.keys())[-1]]
pattern_loop_block = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_loop_block.add_layer(
"prim.add",
inputs={"x": gen_name(88)},
outputs=[gen_name(89)],
y=2)
pattern_loop_block.add_layer(
"prim.shape_dim",
inputs={"input": gen_name(34),
"dim": gen_name(89)},
outputs=[gen_name(90)])
pattern_loop_block.add_layer(
"prim.float",
inputs={"input": gen_name(90)},
outputs=[gen_name(91)])
pattern_loop_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(65),
"element": gen_name(88)},
outputs=[gen_name(92)])
pattern_loop_block.add_layer(
"prim.mul",
inputs={"x": gen_name(91),
"y": gen_name(92)},
outputs=[gen_name(93)])
pattern_loop_block.add_layer(
"prim.floor",
inputs={"input": gen_name(93)},
outputs=[gen_name(94)])
pattern_loop_block.add_layer(
"prim.append",
inputs={"list": gen_name(86),
"element": gen_name(94)},
outputs=[])
loop_layer.add_block(pattern_loop_block)
loop_layer.inputs.update({
"input-0": gen_name(34),
"input-1": gen_name(65),
"input-2": gen_name(86)
})
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(86)},
outputs=[gen_name(61)])
if_layer9.add_block(pattern_block_block_block)
if_layer9.inputs.update({
"input-0": gen_name(51),
"input-1": gen_name(54),
"input-2": gen_name(54),
"input-3": gen_name(37),
"input-4": gen_name(34)
})
pattern_block_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(11)},
outputs=[gen_name(95)],
element=0)
pattern_block_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(11)},
outputs=[gen_name(96)],
element=1)
pattern_block_block.add_layer(
"prim.isinstance",
inputs={"input": gen_name(61)},
outputs=["interpolate-input-0_isinstance"],
cls="paddle.fluid.Variable")
pattern_block_block.add_layer(
"prim.if", {"input": "interpolate-input-0_isinstance"},
outputs=["interpolate-input-0_if1"])
if_layer_isinstance = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer_isinstance, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.var2list",
inputs={"input": gen_name(61)},
outputs=[gen_name(61)])
if_layer_isinstance.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer_isinstance, graph_type="dygraph")
if_layer_isinstance.add_block(pattern_block_block_block)
if_layer_isinstance.inputs["input-0"] = gen_name(61)
pattern_block_block.add_layer(
"prim.assert",
inputs={"key": gen_name(95),
"value": gen_name(96)},
outputs=[gen_name(97) + "_assert"],
type="eq")
pattern_block_block.add_layer(
"paddle.nn.functional.interpolate",
inputs={
"input": "interpolate-input-0",
"size": gen_name(61),
"scale_factor": gen_name(95)
},
outputs=[gen_name(97)],
align_corners=False,
align_mode=0)
pattern_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(97)},
outputs=[gen_name(20)])
if_layer2.add_block(pattern_block_block)
pattern_block_block = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block_block.add_layer(
"fluid.layers.shape",
inputs={"input": "interpolate-input-0"},
outputs=[gen_name(98)])
pattern_block_block.add_layer(
"prim.len",
inputs={"input": gen_name(98)},
outputs=[gen_name(98)])
pattern_block_block.add_layer(
"prim.eq",
inputs={"x": gen_name(98)},
outputs=[gen_name(99)],
y=5)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(99)},
outputs=[gen_name(100)])
if_layer16 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer16, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(101)],
input="Exception")
if_layer16.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer16, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(102)],
input="Exception")
if_layer16.add_block(pattern_block_block_block)
pattern_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(20)], input=None)
if_layer2.add_block(pattern_block_block)
if_layer2.inputs.update({
"input-0": gen_name(13),
"input-1": gen_name(13),
"input-2": "interpolate-input-0",
"input-3": gen_name(11),
"input-5": gen_name(11),
})
pattern_block.add_layer(
"prim.equal",
inputs={"input": gen_name(20)},
outputs=[gen_name(16)])
if_layer1.add_block(pattern_block)
if_layer1.inputs.update({
"input-2": "interpolate-input-0",
"input-4": gen_name(13),
"input-7": gen_name(11),
"input-9": gen_name(11),
"input-11": "interpolate-input-0",
"input-12": "interpolate-input-0",
})
self.pattern.build(inputs={
"input-0": "interpolate-input-0",
"input-1": "interpolate-input-1"
})
else:
self.pattern.add_layer(
"fluid.layers.shape",
inputs={"input": "interpolate-input-0"},
outputs=[gen_name(9)])
self.pattern.add_layer(
"prim.len",
inputs={"input": gen_name(9)},
outputs=[gen_name(9)])
self.pattern.add_layer(
"prim.sub",
inputs={"x": gen_name(9)},
outputs=[gen_name(10)],
y=2)
self.pattern.add_layer(
"prim.list", inputs={}, outputs=[gen_name(11)])
self.pattern.add_layer(
"prim.loop",
inputs={"input": gen_name(10)},
outputs=[gen_name(12.1), gen_name(12.2)])
loop_layer = self.pattern.layers[list(self.pattern.layers.keys())[
-1]]
pattern_block = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_block.add_layer(
"prim.append",
inputs={"list": gen_name(11)},
outputs=[],
element=None)
loop_layer.inputs["input-0"] = gen_name(11)
loop_layer.add_block(pattern_block)
self.pattern.add_layer(
"prim.tuple",
inputs={
"input0": "interpolate-input-0",
"input1": "interpolate-input-1",
},
outputs=[gen_name(13)],
input2=None,
input3=None)
self.pattern.add_layer(
"fluid.layers.shape",
inputs={"input": "interpolate-input-0"},
outputs=[gen_name(14)])
self.pattern.add_layer(
"prim.len",
inputs={"input": gen_name(14)},
outputs=[gen_name(14)])
self.pattern.add_layer(
"prim.eq",
inputs={"x": gen_name(14)},
outputs=[gen_name(15)],
y=3)
self.pattern.add_layer(
"prim.if",
inputs={"input": gen_name(15)},
outputs=[gen_name(16)])
if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[
-1]]
pattern_block = PaddleGraph(if_layer1, graph_type="dygraph")
pattern_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(17)],
input="Exception")
pattern_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(16)], input=None)
if_layer1.add_block(pattern_block)
pattern_block = PaddleGraph(if_layer1, graph_type="dygraph")
pattern_block.add_layer(
"fluid.layers.shape",
inputs={"input": "interpolate-input-0"},
outputs=[gen_name(18)])
pattern_block.add_layer(
"prim.len",
inputs={"input": gen_name(18)},
outputs=[gen_name(18)])
pattern_block.add_layer(
"prim.eq",
inputs={"x": gen_name(18)},
outputs=[gen_name(19)],
y=4)
pattern_block.add_layer(
"prim.if",
inputs={"input": gen_name(19)},
outputs=[gen_name(20)])
if_layer2 = pattern_block.layers[list(pattern_block.layers.keys())[
-1]]
pattern_block_block = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block_block.add_layer(
"prim.tuple_unpack",
inputs={"input": gen_name(13)},
outputs=[
gen_name(34), gen_name(35), gen_name(36), gen_name(37)
])
pattern_block_block.add_layer(
"prim.is",
inputs={"x": gen_name(35)},
outputs=[gen_name(38)],
y=None)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(38)},
outputs=[gen_name(39), gen_name(40)])
if_layer3 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer3, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.is",
inputs={"x": gen_name(36)},
outputs=[gen_name(41)],
y=None)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(41)},
outputs=[gen_name(39)])
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(35)},
outputs=[gen_name(40)])
if_layer3.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer3, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(35)},
outputs=[gen_name(42)])
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(39)], input=False)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(35)},
outputs=[gen_name(40)])
if_layer3.add_block(pattern_block_block_block)
if_layer3.inputs.update({
"input-0": gen_name(36),
'input-1': gen_name(35),
'input-2': gen_name(35),
})
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(39)},
outputs=[gen_name(43)])
if_layer4 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer4, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(44)],
input="Exception")
if_layer4.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer4, graph_type="dygraph")
if_layer4.add_block(pattern_block_block_block)
pattern_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(40)},
outputs=[gen_name(45)],
y=None)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(45)},
outputs=[gen_name(46), gen_name(47)])
if_layer5 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer5, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(40)},
outputs=[gen_name(48)])
pattern_block_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(36)},
outputs=[gen_name(49)],
y=None)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(49)},
outputs=[gen_name(46)])
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(48)},
outputs=[gen_name(47)])
if_layer5.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer5, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(46)], input=False)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(40)},
outputs=[gen_name(47)])
if_layer5.add_block(pattern_block_block_block)
if_layer5.inputs.update({
"input-0": gen_name(40),
"input-1": gen_name(36),
"input-3": gen_name(40)
})
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(46)},
outputs=[gen_name(50), gen_name(51)])
if_layer6 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer6, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(52)],
input="Exception")
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(50)], input=None)
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(51)], input=None)
if_layer6.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer6, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(36)},
outputs=[gen_name(50)])
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(47)},
outputs=[gen_name(51)])
if_layer6.add_block(pattern_block_block_block)
if_layer6.inputs.update({
"input-0": gen_name(36),
"input-1": gen_name(47)
})
pattern_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(50)},
outputs=[gen_name(53)],
y=None)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(53)},
outputs=[gen_name(54)])
if_layer7 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer7, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(50)},
outputs=[gen_name(55)])
pattern_block_block_block.add_layer(
"prim.len",
inputs={"input": gen_name(55)},
outputs=[gen_name(56)])
pattern_block_block_block.add_layer(
"prim.ne",
inputs={"x": gen_name(56)},
outputs=[gen_name(57)],
y=2)
pattern_block_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(57)},
outputs=[gen_name(58)])
if_layer8 = pattern_block_block_block.layers[list(
pattern_block_block_block.layers.keys())[-1]]
pattern_block_block_block_block = PaddleGraph(
if_layer8, graph_type="dygraph")
pattern_block_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(59)],
input="Exception")
if_layer8.add_block(pattern_block_block_block_block)
pattern_block_block_block_block = PaddleGraph(
if_layer8, graph_type="dygraph")
if_layer8.add_block(pattern_block_block_block_block)
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(55)},
outputs=[gen_name(54)])
if_layer7.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer7, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(50)},
outputs=[gen_name(54)])
if_layer7.add_block(pattern_block_block_block)
if_layer7.inputs.update({
"input-0": gen_name(50),
"input-1": gen_name(50)
})
pattern_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(51)},
outputs=[gen_name(60)],
y=None)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(60)},
outputs=[gen_name(61)])
if_layer9 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer9, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(51)},
outputs=[gen_name(62)])
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(62)},
outputs=[gen_name(61)])
if_layer9.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer9, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(61)], input=None)
if_layer9.add_block(pattern_block_block_block)
if_layer9.inputs.update({"input-0": gen_name(51)})
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(60)},
outputs=[gen_name(63)])
if_layer10 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer10, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(61)},
outputs=[gen_name(63)])
if_layer10.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer10, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.isnot",
inputs={"x": gen_name(54)},
outputs=[gen_name(64)],
y=None)
pattern_block_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(64)},
outputs=[gen_name(65)])
if_layer11 = pattern_block_block_block.layers[list(
pattern_block_block_block.layers.keys())[-1]]
pattern_block_block_block_block = PaddleGraph(
if_layer11, graph_type="dygraph")
pattern_block_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(54)},
outputs=[gen_name(66)])
pattern_block_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(66)},
outputs=[gen_name(65)])
if_layer11.add_block(pattern_block_block_block_block)
pattern_block_block_block_block = PaddleGraph(
if_layer11, graph_type="dygraph")
pattern_block_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(67)],
input="Exception")
pattern_block_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(65)], input=None)
if_layer11.add_block(pattern_block_block_block_block)
if_layer11.inputs.update({"input-0": gen_name(54), })
pattern_block_block_block.add_layer(
"prim.is",
inputs={"x": gen_name(37)},
outputs=[gen_name(68)],
y=None)
pattern_block_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(68)},
outputs=[gen_name(69)])
if_layer12 = pattern_block_block_block.layers[list(
pattern_block_block_block.layers.keys())[-1]]
pattern_block_block_block_block = PaddleGraph(
if_layer12, graph_type="dygraph")
pattern_block_block_block_block.add_layer(
"prim.len",
inputs={"input": gen_name(65)},
outputs=[gen_name(70)])
pattern_block_block_block_block.add_layer(
"prim.gt",
inputs={"x": gen_name(70)},
outputs=[gen_name(71)],
y=0)
pattern_block_block_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(72)], input=0)
pattern_block_block_block_block.add_layer(
"prim.loop",
inputs={},
outputs=[gen_name(74), gen_name(75), gen_name(76.1)],
input=2147483647)
loop_layer = pattern_block_block_block_block.layers[list(
pattern_block_block_block_block.layers.keys())[-1]]
pattern_loop_block = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_loop_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(65),
"element": gen_name(72)},
outputs=[gen_name(74.1)])
pattern_loop_block.add_layer(
"prim.floor",
inputs={"input": gen_name(74.1)},
outputs=[gen_name(75.1)])
pattern_loop_block.add_layer(
"prim.ne",
inputs={"x": gen_name(75.1),
"y": gen_name(74.1)},
outputs=[gen_name(76)])
pattern_loop_block.add_layer(
"prim.if",
inputs={"input": gen_name(76)},
outputs=[gen_name(77), gen_name(78)])
if_layer13 = pattern_loop_block.layers[list(
pattern_loop_block.layers.keys())[-1]]
pattern_loop_block_block = PaddleGraph(
if_layer13, graph_type="dygraph")
pattern_loop_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(77)], input=False)
pattern_loop_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(76)},
outputs=[gen_name(78)])
if_layer13.add_block(pattern_loop_block_block)
pattern_loop_block_block = PaddleGraph(
if_layer13, graph_type="dygraph")
pattern_loop_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(77)], input=None)
pattern_loop_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(78)], input=None)
if_layer13.add_block(pattern_loop_block_block)
if_layer13.inputs.update({"input-0": gen_name(76), })
pattern_loop_block.add_layer(
"prim.if",
inputs={"input": gen_name(76)},
outputs=[gen_name(79), gen_name(80)])
if_layer14 = pattern_loop_block.layers[list(
pattern_loop_block.layers.keys())[-1]]
pattern_loop_block_block = PaddleGraph(
if_layer14, graph_type="dygraph")
pattern_loop_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(77)},
outputs=[gen_name(79)])
pattern_loop_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(78)},
outputs=[gen_name(80)])
if_layer14.add_block(pattern_loop_block_block)
pattern_loop_block_block = PaddleGraph(
if_layer14, graph_type="dygraph")
pattern_loop_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(79)], input=True)
pattern_loop_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(76)},
outputs=[gen_name(80)])
if_layer14.add_block(pattern_loop_block_block)
if_layer14.inputs.update({
"input-0": gen_name(77),
"input-1": gen_name(78),
"input-2": gen_name(76)
})
pattern_loop_block.add_layer(
"prim.add",
inputs={"x": gen_name(72)},
outputs=[gen_name(81)],
y=1)
pattern_loop_block.add_layer(
"prim.lt",
inputs={"x": gen_name(81),
"y": gen_name(70)},
outputs=[gen_name(82)])
pattern_loop_block.add_layer(
"prim.and",
inputs={"x": gen_name(82),
"y": gen_name(79)},
outputs=[gen_name(83)])
pattern_loop_block.add_layer(
"prim.equal",
inputs={"input": gen_name(80)},
outputs=[gen_name(74)])
pattern_loop_block.add_layer(
"prim.equal",
inputs={"input": gen_name(81)},
outputs=[gen_name(75)])
loop_layer.add_block(pattern_loop_block)
loop_layer.inputs.update({
"input-0": gen_name(65),
"input-1": gen_name(72),
"input-2": gen_name(72),
"input-3": gen_name(70)
})
pattern_block_block_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(74)},
outputs=[gen_name(84)])
if_layer15 = pattern_block_block_block_block.layers[list(
pattern_block_block_block_block.layers.keys())[-1]]
pattern_block_block_block_block_block = PaddleGraph(
if_layer15, graph_type="dygraph")
pattern_block_block_block_block_block.add_layer(
"prim.warnings",
inputs={},
outputs=[gen_name(85)],
stacklevel=2,
input="...")
if_layer15.add_block(pattern_block_block_block_block_block)
pattern_block_block_block_block_block = PaddleGraph(
if_layer15, graph_type="dygraph")
if_layer15.add_block(pattern_block_block_block_block_block)
if_layer12.add_block(pattern_block_block_block_block)
pattern_block_block_block_block = PaddleGraph(
if_layer12, graph_type="dygraph")
if_layer12.add_block(pattern_block_block_block_block)
if_layer12.inputs.update({
"input-0": gen_name(65),
"input-1": gen_name(65),
})
pattern_block_block_block.add_layer(
"prim.list", inputs={}, outputs=[gen_name(86)])
pattern_block_block_block.add_layer(
"prim.loop",
inputs={},
outputs=[gen_name(87), gen_name(88)],
input=2)
loop_layer = pattern_block_block_block.layers[list(
pattern_block_block_block.layers.keys())[-1]]
pattern_loop_block = PaddleGraph(loop_layer, graph_type="dygraph")
pattern_loop_block.add_layer(
"prim.add",
inputs={"x": gen_name(88)},
outputs=[gen_name(89)],
y=2)
pattern_loop_block.add_layer(
"prim.shape_dim",
inputs={"input": gen_name(34),
"dim": gen_name(89)},
outputs=[gen_name(90)])
pattern_loop_block.add_layer(
"prim.float",
inputs={"input": gen_name(90)},
outputs=[gen_name(91)])
pattern_loop_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(65),
"element": gen_name(88)},
outputs=[gen_name(92)])
pattern_loop_block.add_layer(
"prim.mul",
inputs={"x": gen_name(91),
"y": gen_name(92)},
outputs=[gen_name(93)])
pattern_loop_block.add_layer(
"prim.floor",
inputs={"input": gen_name(93)},
outputs=[gen_name(94)])
pattern_loop_block.add_layer(
"prim.append",
inputs={"list": gen_name(86),
"element": gen_name(94)},
outputs=[])
loop_layer.add_block(pattern_loop_block)
loop_layer.inputs.update({
"input-0": gen_name(34),
"input-1": gen_name(65),
"input-2": gen_name(86)
})
pattern_block_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(86)},
outputs=[gen_name(63)])
if_layer10.add_block(pattern_block_block_block)
if_layer10.inputs.update({
"input-0": gen_name(61),
"input-1": gen_name(54),
"input-2": gen_name(54),
"input-3": gen_name(37),
"input-4": gen_name(34)
})
pattern_block_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(11)},
outputs=[gen_name(95)],
element=0)
pattern_block_block.add_layer(
"prim.getitem",
inputs={"list": gen_name(11)},
outputs=[gen_name(96)],
element=1)
pattern_block_block.add_layer(
"prim.isinstance",
inputs={"input": gen_name(63)},
outputs=["interpolate-input-0_isinstance"],
cls="paddle.fluid.Variable")
pattern_block_block.add_layer(
"prim.if", {"input": "interpolate-input-0_isinstance"},
outputs=["interpolate-input-0_if1"])
if_layer_isinstance = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer_isinstance, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.var2list",
inputs={"input": gen_name(63)},
outputs=[gen_name(63)])
if_layer_isinstance.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer_isinstance, graph_type="dygraph")
if_layer_isinstance.add_block(pattern_block_block_block)
if_layer_isinstance.inputs["input-0"] = gen_name(63)
pattern_block_block.add_layer(
"prim.assert",
inputs={"key": gen_name(95),
"value": gen_name(96)},
outputs=[gen_name(97) + "_assert"],
type="eq")
pattern_block_block.add_layer(
"paddle.nn.functional.interpolate",
inputs={
"input": "interpolate-input-0",
"size": gen_name(63),
"scale_factor": gen_name(95)
},
outputs=[gen_name(97)],
align_corners=False,
align_mode=0)
pattern_block_block.add_layer(
"prim.equal",
inputs={"input": gen_name(97)},
outputs=[gen_name(20)])
if_layer2.add_block(pattern_block_block)
pattern_block_block = PaddleGraph(if_layer2, graph_type="dygraph")
pattern_block_block.add_layer(
"fluid.layers.shape",
inputs={"input": "interpolate-input-0"},
outputs=[gen_name(98)])
pattern_block_block.add_layer(
"prim.len",
inputs={"input": gen_name(98)},
outputs=[gen_name(98)])
pattern_block_block.add_layer(
"prim.eq",
inputs={"x": gen_name(98)},
outputs=[gen_name(99)],
y=5)
pattern_block_block.add_layer(
"prim.if",
inputs={"input": gen_name(99)},
outputs=[gen_name(100)])
if_layer16 = pattern_block_block.layers[list(
pattern_block_block.layers.keys())[-1]]
pattern_block_block_block = PaddleGraph(
if_layer16, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(101)],
input="Exception")
if_layer16.add_block(pattern_block_block_block)
pattern_block_block_block = PaddleGraph(
if_layer16, graph_type="dygraph")
pattern_block_block_block.add_layer(
"prim.exception",
inputs={},
outputs=[gen_name(102)],
input="Exception")
if_layer16.add_block(pattern_block_block_block)
pattern_block_block.add_layer(
"prim.equal", inputs={}, outputs=[gen_name(20)], input=None)
if_layer2.add_block(pattern_block_block)
if_layer2.inputs.update({
"input-0": gen_name(13),
"input-1": gen_name(13),
"input-2": "interpolate-input-0",
"input-3": gen_name(11),
"input-5": gen_name(11),
})
pattern_block.add_layer(
"prim.equal",
inputs={"input": gen_name(20)},
outputs=[gen_name(16)])
if_layer1.add_block(pattern_block)
if_layer1.inputs.update({
"input-2": "interpolate-input-0",
"input-4": gen_name(13),
"input-7": gen_name(11),
"input-9": gen_name(11),
"input-11": "interpolate-input-0",
"input-12": "interpolate-input-0",
})
self.pattern.build(inputs={
"input-0": "interpolate-input-0",
"input-1": "interpolate-input-1"
})
def insert_new_layer(self, graph, parameters, matches):
new_layers = self.gen_new_layer(parameters, matches)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layers[0]
matches.pop(new_layer_id)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layers[1]
block_layer = new_layers[1].blocks[0].layers.pop(
list(new_layers[1].blocks[0].layers.keys())[-1])
new_layers[1].blocks[0].layers[new_layer_id + ".0.0"] = block_layer
matches.pop(new_layer_id)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layers[2]
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layers = list()
layers_id = list(matches.keys())
layer = matches[layers_id[6]]
size = layer.inputs["input1"]
layer = matches[layers_id[92]]
layer.inputs["input"] = size
layers.append(layer)
layer = matches[layers_id[93]]
block_layer = layer.blocks[0].layers[list(layer.blocks[0].layers.keys())
[0]]
block_layer.inputs["input"] = size
block_layer.outputs[0] = size
layer.inputs["input-0"] = size
layers.append(layer)
layer = matches[layers_id[-1]]
outputs = layer.outputs
layer = matches[layers_id[96]]
layer.inputs.pop("scale_factor")
layer.inputs["size"] = size
layer.outputs = outputs
layers.append(layer)
return layers
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion import ReshapeFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class ReshapeFusePass(Pass):
name = "reshape_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = ReshapeFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
reshape_fuse_pass = ReshapeFusePass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class ReshapeFuser(FuseBase):
def __init__(self):
super(ReshapeFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的reshape图结构。
reshape层模式python实现代码示例:
x165 = int(x164)
x166 = [x158, x159, x165]
x167 = fluid.layers.reshape(x=x157, shape=x166)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"prim.int",
inputs={"input": "reshape-input-0"},
outputs=[gen_name(0)])
self.pattern.add_layer(
"prim.list",
inputs={
"input0": "reshape-input-1",
"input1": "reshape-input-2",
"input2": gen_name(0)
},
outputs=[gen_name(1)])
self.pattern.add_layer(
"fluid.layers.reshape",
inputs={"x": "reshape-input-3",
"shape": gen_name(1)},
outputs=[gen_name(2)])
self.pattern.build(inputs={
"input-0": "reshape-input-0",
"input-1": "reshape-input-1",
"input-2": "reshape-input-2",
"input-3": "reshape-input-3",
})
def insert_new_layer(self, graph, parameters, matches):
self.update_layer(matches)
matches.pop(list(matches.keys())[1])
matches.pop(list(matches.keys())[1])
def update_layer(self, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[0]]
int_input_name = layer.inputs["input"]
output_name = layer.outputs[0]
layer = matches[layers_id[1]]
for key, input_name in layer.inputs.items():
if input_name == output_name:
layer.inputs[key] = int_input_name
......@@ -19,11 +19,10 @@ from x2paddle.optimizer.pass_manager import PassManager
class GraphOptimizer(object):
def __init__(self):
self.passes = [
"fc_fuse_pass",
# "nn_adaptive_pool2d_fuse_pass",
# "functional_adaptive_pool2d_fuse_pass",
# "batchnorm2d_fuse_pass",
"constant_fuse_pass"
"constant_fuse_pass", "batchnorm2d_fuse_pass",
"interpolate_bilinear_fuse_pass", "fc_fuse_pass",
"adaptive_pool2d_fuse_pass", "reshape_fuse_pass",
"dropout_fuse_pass"
]
def optimize(self, graph):
......
......@@ -34,7 +34,7 @@ class PatternMatcher(object):
并将子图的id以拓扑排序存放到subgraph_id2layers。
"""
def get_subgraph(pattern, graph, start_index):
def get_subgraph(pattern, graph, start_index, is_subblock=False):
pattern_index = 0
pattern_id2layers = pattern.get_global_layers()
pattern_ids = list(pattern_id2layers.keys())
......@@ -49,11 +49,19 @@ class PatternMatcher(object):
# 判断输入连接是否一致
if layer_id in graph.edges_in:
if pattern_layer_id not in pattern.edges_in:
if pattern_index == 0 or is_subblock:
return False
else:
subgraph_id2layers.pop(layer_id)
continue
else:
if len(graph.edges_in[layer_id]) != len(
pattern.edges_in[pattern_layer_id]):
if pattern_index == 0 or is_subblock:
return False
else:
subgraph_id2layers.pop(layer_id)
continue
layer_in = graph.edges_in[layer_id]
pattern_layer_in = pattern.edges_in[pattern_layer_id]
for i in range(len(layer_in)):
......@@ -61,19 +69,29 @@ class PatternMatcher(object):
pattern_layer_id_in = pattern_layer_in[i]
if pattern_layer_id_in != -1:
subgraph_ids = list(subgraph_id2layers.keys())
if layer_id_in not in subgraph_ids:
return False
if pattern_ids.index(pattern_layer_id_in) == \
subgraph_ids.index(layer_id_in):
# 判断pattern输入在pattern_ids的索引
# 和graph输入在subgraph_ids的索引一致
continue
if pattern_index == 0 or is_subblock:
return False
else:
subgraph_id2layers.pop(layer_id)
continue
# 判断subgraph中的节点是否被外部图使用到(如若被使用到则无效)
if layer_id in graph.edges_out:
if pattern_layer_id not in pattern.edges_out:
if not set(pattern_layer.outputs).issubset(
pattern.outputs):
# 若pattern当前layer的输出是pattern的输出,则是正确的
if pattern_index == 0 or is_subblock:
return False
else:
subgraph_id2layers.pop(layer_id)
continue
else:
if len(graph.edges_out[layer_id]) != len(
pattern.edges_out[pattern_layer_id]):
......@@ -81,24 +99,52 @@ class PatternMatcher(object):
if not set(pattern_layer.outputs).issubset(
pattern.outputs):
# 若pattern当前layer的输出是pattern的输出,则是正确的
if pattern_index == 0 or is_subblock:
return False
else:
subgraph_id2layers.pop(layer_id)
continue
# 当为控制流时的处理
if layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if len(pattern_layer.blocks) != len(layer.blocks):
if pattern_index == 0 or is_subblock:
return False
else:
subgraph_id2layers.pop(layer_id)
continue
is_subblock_match = True
for i, b in enumerate(pattern_layer.blocks):
match_info = get_subgraph(pattern_layer.blocks[i],
layer.blocks[i], 0)
match_info = get_subgraph(
pattern_layer.blocks[i],
layer.blocks[i],
0,
is_subblock=True)
if match_info is not False:
subgraph_id2layers.update(match_info)
else:
is_subblock_match = False
break
if not is_subblock_match:
if pattern_index == 0 or is_subblock:
return False
else:
index = list(subgraph_id2layers.keys()).index(
layer_id)
for key in list(subgraph_id2layers.keys())[
index:]:
subgraph_id2layers.pop(key)
continue
pattern_index += 1
if pattern_index == len(pattern.layers):
return subgraph_id2layers
else:
if pattern_index == 0 or is_subblock:
return False
else:
continue
if pattern_index == len(pattern.layers):
return subgraph_id2layers
return False
for i, (layer_id, layer) in enumerate(graph.layers.items()):
match_info = get_subgraph(self.pattern, graph, i)
......@@ -108,10 +154,70 @@ class PatternMatcher(object):
if len(block.layers) > 0:
self.detect_patterns_by_topo(layer.blocks[j])
def detect_patterns_by_edge(self, graph):
def detect_patterns_by_edge(self, graph, ignore_list_inputs=True):
"""当遇见顺序没有强制规定的pattern时使用该方式
"""
pass
def get_subgraph(pattern, graph, start_index):
pattern_id2layers = pattern.get_global_layers()
pattern_ids = list(pattern_id2layers.keys())
pattern_layer_id = pattern_ids[0]
subgraph_id2layers = dict()
graph_layers = dict(list(graph.layers.items())[start_index:])
layer_id = list(graph_layers.keys())[0]
def update(layer_id, pattern_layer_id):
layer = graph_layers[layer_id]
pattern_layer = pattern_id2layers[pattern_layer_id]
if layer.kernel != pattern_layer.kernel:
return False
subgraph_id2layers[layer_id] = layer
for i, pattern_layer_id_in in enumerate(pattern.edges_in[
pattern_layer_id]):
if pattern_layer_id_in == -1 or ignore_list_inputs:
continue
layer_id_in = graph.edges_in[layer_id][i]
subgraph_ids = list(subgraph_id2layers.keys())
if layer_id_in not in subgraph_ids:
return False
if pattern.edges_out.get(pattern_layer_id, 0) != 0:
if len(pattern.edges_out[pattern_layer_id]) != \
len(graph.edges_out[layer_id]):
return False
for i, pattern_layer_id_out in enumerate(pattern.edges_out[
pattern_layer_id]):
if pattern_layer_id_out in pattern_ids:
new_layer_id_out = graph.edges_out[layer_id][i]
for j, new_new_layer_id_in in enumerate(
graph.edges_in[new_layer_id_out]):
if new_new_layer_id_in not in subgraph_id2layers:
if ignore_list_inputs:
continue
new_new_pattern_layer_id_in = pattern.edges_in[
pattern_layer_id_out][j]
if new_new_pattern_layer_id_in == -1:
continue
update(new_new_layer_id_in,
new_new_pattern_layer_id_in)
update(new_layer_id_out, pattern_layer_id_out)
while len(subgraph_id2layers) != len(pattern_id2layers):
out = update(layer_id, pattern_layer_id)
if out == False:
return False
else:
if len(subgraph_id2layers) == len(pattern_id2layers):
return subgraph_id2layers
else:
return False
for i, (layer_id, layer) in enumerate(graph.layers.items()):
match_info = get_subgraph(self.pattern, graph, i)
if match_info:
self.matches.append(match_info)
for j, block in enumerate(layer.blocks):
if len(block.layers) > 0:
self.detect_patterns_by_edge(layer.blocks[j])
def remove_overlapped_match(self):
""" 如果2个子图有重叠,只取前一个子图。
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册