From a33bf0f09dd532ab76cb5b65a0dc22fe8bf0e1ca Mon Sep 17 00:00:00 2001 From: SunAhong1993 Date: Thu, 17 Sep 2020 14:11:23 +0800 Subject: [PATCH] add pytorch code and docs --- README.md | 7 +- pytorch_to_script.md | 57 + x2paddle/__init__.py | 4 +- x2paddle/convert.py | 102 +- x2paddle/core/program.py | 442 +- x2paddle/decoder/pytorch_decoder.py | 34 + x2paddle/op_mapper/pytorch2paddle/__init__.py | 0 .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 167 bytes .../__pycache__/aten.cpython-37.pyc | Bin 0 -> 89800 bytes .../__pycache__/prim.cpython-37.pyc | Bin 0 -> 13490 bytes .../__pycache__/prim2code.cpython-37.pyc | Bin 0 -> 14460 bytes .../pytorch_op_mapper.cpython-37.pyc | Bin 0 -> 5896 bytes x2paddle/op_mapper/pytorch2paddle/aten.py | 4153 +++++++++++++++++ x2paddle/op_mapper/pytorch2paddle/prim.py | 541 +++ .../op_mapper/pytorch2paddle/prim2code.py | 392 ++ .../pytorch2paddle/pytorch_op_mapper.py | 249 + .../__pycache__/optimizer.cpython-37.pyc | Bin 0 -> 1041 bytes .../__pycache__/pass_.cpython-37.pyc | Bin 0 -> 758 bytes .../__pycache__/pass_manager.cpython-37.pyc | Bin 0 -> 1118 bytes .../pattern_matcher.cpython-37.pyc | Bin 0 -> 7351 bytes .../pytorch_optimizer/fusion/__init__.py | 28 + .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 1006 bytes .../adaptive_pool2d_fuse_pass.cpython-37.pyc | Bin 0 -> 999 bytes .../adaptive_pool2d_fuser.cpython-37.pyc | Bin 0 -> 3896 bytes .../batchnorm2d_fuse_pass.cpython-37.pyc | Bin 0 -> 979 bytes .../batchnorm2d_fuser.cpython-37.pyc | Bin 0 -> 4213 bytes .../constant_fuse_pass.cpython-37.pyc | Bin 0 -> 961 bytes .../__pycache__/constant_fuser.cpython-37.pyc | Bin 0 -> 2174 bytes .../dropout_fuse_pass.cpython-37.pyc | Bin 0 -> 955 bytes .../__pycache__/dropout_fuser.cpython-37.pyc | Bin 0 -> 2155 bytes .../__pycache__/fc_fuse_pass.cpython-37.pyc | Bin 0 -> 925 bytes .../__pycache__/fc_fuser.cpython-37.pyc | Bin 0 -> 4356 bytes .../interpolate_bilinear_fuse.cpython-37.pyc | Bin 0 -> 18420 bytes ...erpolate_bilinear_fuse_pass.cpython-37.pyc | Bin 0 -> 1029 bytes .../interpolate_bilinear_fuser.cpython-37.pyc | Bin 0 -> 23130 bytes .../reshape_fuse_pass.cpython-37.pyc | Bin 0 -> 955 bytes .../__pycache__/reshape_fuser.cpython-37.pyc | Bin 0 -> 2292 bytes .../fusion/adaptive_pool2d_fuse_pass.py | 33 + .../fusion/adaptive_pool2d_fuser.py | 133 + .../fusion/batchnorm2d_fuse_pass.py | 33 + .../fusion/batchnorm2d_fuser.py | 158 + .../fusion/constant_fuse_pass.py | 33 + .../fusion/constant_fuser.py | 63 + .../fusion/dropout_fuse_pass.py | 33 + .../pytorch_optimizer/fusion/dropout_fuser.py | 60 + .../pytorch_optimizer/fusion/fc_fuse_pass.py | 33 + .../pytorch_optimizer/fusion/fc_fuser.py | 158 + .../fusion/interpolate_bilinear_fuse_pass.py | 33 + .../fusion/interpolate_bilinear_fuser.py | 1552 ++++++ .../fusion/reshape_fuse_pass.py | 33 + .../pytorch_optimizer/fusion/reshape_fuser.py | 73 + .../optimizer/pytorch_optimizer/optimizer.py | 33 + x2paddle/optimizer/pytorch_optimizer/pass_.py | 27 + .../pytorch_optimizer/pass_manager.py | 42 + .../pytorch_optimizer/pattern_matcher.py | 293 ++ 55 files changed, 8752 insertions(+), 80 deletions(-) create mode 100644 pytorch_to_script.md create mode 100644 x2paddle/decoder/pytorch_decoder.py create mode 100644 x2paddle/op_mapper/pytorch2paddle/__init__.py create mode 100644 x2paddle/op_mapper/pytorch2paddle/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/pytorch2paddle/__pycache__/aten.cpython-37.pyc create mode 100644 x2paddle/op_mapper/pytorch2paddle/__pycache__/prim.cpython-37.pyc create mode 100644 x2paddle/op_mapper/pytorch2paddle/__pycache__/prim2code.cpython-37.pyc create mode 100644 x2paddle/op_mapper/pytorch2paddle/__pycache__/pytorch_op_mapper.cpython-37.pyc create mode 100644 x2paddle/op_mapper/pytorch2paddle/aten.py create mode 100644 x2paddle/op_mapper/pytorch2paddle/prim.py create mode 100644 x2paddle/op_mapper/pytorch2paddle/prim2code.py create mode 100644 x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/__pycache__/optimizer.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/__pycache__/pass_.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/__pycache__/pass_manager.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/__pycache__/pattern_matcher.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__init__.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/adaptive_pool2d_fuse_pass.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/adaptive_pool2d_fuser.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/batchnorm2d_fuse_pass.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/batchnorm2d_fuser.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/constant_fuse_pass.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/constant_fuser.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/dropout_fuse_pass.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/dropout_fuser.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/fc_fuse_pass.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/fc_fuser.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/interpolate_bilinear_fuse.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/interpolate_bilinear_fuse_pass.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/interpolate_bilinear_fuser.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/reshape_fuse_pass.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/reshape_fuser.cpython-37.pyc create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/adaptive_pool2d_fuse_pass.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/adaptive_pool2d_fuser.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/batchnorm2d_fuse_pass.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/batchnorm2d_fuser.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/constant_fuse_pass.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/constant_fuser.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/dropout_fuse_pass.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/dropout_fuser.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/fc_fuse_pass.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/fc_fuser.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/interpolate_bilinear_fuse_pass.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/interpolate_bilinear_fuser.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/reshape_fuse_pass.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/fusion/reshape_fuser.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/optimizer.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/pass_.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/pass_manager.py create mode 100644 x2paddle/optimizer/pytorch_optimizer/pattern_matcher.py diff --git a/README.md b/README.md index 05bd30e..854d79f 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,10 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel ``` x2paddle --framework=onnx --model=onnx_model.onnx --save_dir=pd_model ``` +### PyTorch +``` +x2paddle --framework=pytorch --model=resnet50.pt --save_dir=pd_model --input_shapes [-1,3,224,224] +``` ### Paddle2ONNX ``` # 注意:paddle_infer_model_dir下需包含__model__和__params__两个文件 @@ -52,7 +56,7 @@ x2paddle --framework=paddle2onnx --model=paddle_infer_model_dir --save_dir=onnx_ ### 参数选项 | 参数 | | |----------|--------------| -|--framework | 源模型类型 (tensorflow、caffe、onnx、paddle2onnx) | +|--framework | 源模型类型 (tensorflow、caffe、onnx、pytorch、paddle2onnx) | |--prototxt | 当framework为caffe时,该参数指定caffe模型的proto文件路径 | |--weight | 当framework为caffe时,该参数指定caffe模型的参数文件路径 | |--save_dir | 指定转换后的模型保存目录路径 | @@ -62,6 +66,7 @@ x2paddle --framework=paddle2onnx --model=paddle_infer_model_dir --save_dir=onnx_ |--define_input_shape | **[可选]** For TensorFlow, 当指定该参数时,强制用户输入每个Placeholder的shape,见[文档Q2](FAQ.md) | |--params_merge | **[可选]** 当指定该参数时,转换完成后,inference_model中的所有模型参数将合并保存为一个文件__params__ | |--onnx_opset | **[可选]** 当framework为paddle2onnx时,该参数可设置转换为ONNX的OpSet版本,目前支持9、10、11,默认为10 | +|--input_shapes |**[可选]** 当framework为pytorch时,该参数若设置,则根据输入的shape导出inference model(用于预测的静态模型)| diff --git a/pytorch_to_script.md b/pytorch_to_script.md new file mode 100644 index 0000000..229c916 --- /dev/null +++ b/pytorch_to_script.md @@ -0,0 +1,57 @@ +## PyTorch模型导出为ONNX模型 + +目前pytorch2paddle主要支持pytorch ScriptModule。 用户可通过如下示例代码,将torchvison或者自己开发写的模型转换成ScriptModule model: +``` +#coding: utf-8 +import torch +import torch.nn as nn +from torchvision.models.utils import load_state_dict_from_url +# 定义模型 +class AlexNet(nn.Module): + def __init__(self, num_classes=1000): + super(AlexNet, self).__init__() + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) + self.classifier = nn.Sequential( + nn.Dropout(0.0), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(0.0), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + x = self.features(x) + for i in range(1): + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.classifier(x) + return x +# 初始化模型 +model = AlexNet() +# 加载参数 +state_dict = load_state_dict_from_url('https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth', + progress=True) +model.load_state_dict(state_dict) +# 设置模式 +model.eval() +# 生成ScriptModule并保存 +script = torch.jit.script(model) +torch.jit.save(script, "alexnet.pt") +``` diff --git a/x2paddle/__init__.py b/x2paddle/__init__.py index 5b80bf1..9a41e3b 100644 --- a/x2paddle/__init__.py +++ b/x2paddle/__init__.py @@ -1,8 +1,8 @@ __version__ = "0.8.4" -from .core.program import PaddleProgram +from .core.program import PaddleGraph -program = PaddleProgram() +program = PaddleGraph() name_counter = dict() diff --git a/x2paddle/convert.py b/x2paddle/convert.py index c3ba722..1503913 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -13,6 +13,7 @@ # limitations under the License. from six import text_type as _text_type +from x2paddle import program import argparse import sys @@ -66,8 +67,8 @@ def arg_parser(): parser.add_argument( "--without_data_format_optimization", "-wo", - type=_text_type, - default="True", + action="store_true", + default=False, help="tf model conversion without data format optimization") parser.add_argument( "--define_input_shape", @@ -87,13 +88,19 @@ def arg_parser(): action="store_true", default=False, help="define whether merge the params") + parser.add_argument( + "--input_shapes", + "-is", + action='append', + default=None, + help="define the inputs' shape") return parser def tf2paddle(model_path, save_dir, - without_data_format_optimization, + without_data_format_optimization=False, define_input_shape=False, params_merge=False): # check tensorflow installation and version @@ -120,29 +127,10 @@ def tf2paddle(model_path, print("Now translating model from tensorflow to paddle.") model = TFDecoder(model_path, define_input_shape=define_input_shape) - if not without_data_format_optimization: - mapper = TFOpMapper(model) - optimizer = TFOptimizer(mapper) - # neccesary optimization - optimizer.delete_redundance_code() - # optimizer below is experimental - optimizer.optimize_elementwise_op() - optimizer.merge_activation() - optimizer.merge_bias() - optimizer.optimize_sub_graph() - -# optimizer.merge_batch_norm() -# optimizer.merge_prelu() - else: - mapper = TFOpMapperNHWC(model) - optimizer = TFOptimizer(mapper) - optimizer.delete_redundance_code() - optimizer.strip_graph() - optimizer.merge_activation() - optimizer.merge_bias() - optimizer.make_nchw_input_output() - optimizer.remove_transpose() - mapper.save_inference_model(save_dir, params_merge) + + mapper = TFOpMapperNHWC(model) + program.build() + program.gen_model(save_dir) def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False): @@ -170,8 +158,8 @@ def onnx2paddle(model_path, save_dir, params_merge=False): try: import onnx version = onnx.version.version - if version < '1.6.0': - print("[ERROR] onnx>=1.6.0 is required") + if version != '1.6.0': + print("[ERROR] onnx==1.6.0 is required") return except: print("[ERROR] onnx is not installed, use \"pip install onnx==1.6.0\".") @@ -192,17 +180,51 @@ def onnx2paddle(model_path, save_dir, params_merge=False): print("Paddle model and code generated.") +def pytorch2paddle(model_path, save_dir, input_shapes): + # check pytorch installation and version + try: + import torch + version = torch.__version__ + ver_part = version.split('.') + print(ver_part) + if int(ver_part[1]) < 5: + print("[ERROR] pytorch>=1.5.0 is required") + return + except: + print( + "[ERROR] Pytorch is not installed, use \"pip install torch==1.5.0 torchvision\"." + ) + return + print("Now translating model from pytorch to paddle.") + + from x2paddle.decoder.pytorch_decoder import PyTorchDecoder + from x2paddle.op_mapper.pytorch2paddle import pytorch_op_mapper + model = PyTorchDecoder(model_path) + mapper = pytorch_op_mapper.PyTorchOpMapper(model) + mapper.graph.build() + print("Model optimizing ...") + from x2paddle.optimizer.pytorch_optimizer.optimizer import GraphOptimizer + graph_opt = GraphOptimizer() + graph_opt.optimize(mapper.graph) + print("Model optimized.") + if input_shapes is not None: + real_input_shapes = list() + for shape in input_shapes: + sp = shape[1:-1].split(",") + for i, s in enumerate(sp): + sp[i] = int(s) + real_input_shapes.append(sp) + else: + real_input_shapes = None + mapper.graph.gen_model(save_dir, real_input_shapes) + + def paddle2onnx(model_path, save_dir, opset_version=10): from x2paddle.decoder.paddle_decoder import PaddleDecoder from x2paddle.op_mapper.paddle2onnx.paddle_op_mapper import PaddleOpMapper - import paddle.fluid as fluid model = PaddleDecoder(model_path, '__model__', '__params__') mapper = PaddleOpMapper() - mapper.convert( - model.program, - save_dir, - scope=fluid.global_scope(), - opset_version=opset_version) + mapper.convert(model.program, save_dir, opset_number=opset_version) def main(): @@ -240,12 +262,11 @@ def main(): if args.framework == "tensorflow": assert args.model is not None, "--model should be defined while translating tensorflow model" - assert args.without_data_format_optimization in [ - "True", "False" - ], "--the param without_data_format_optimization should be defined True or False" + without_data_format_optimization = False define_input_shape = False params_merge = False - without_data_format_optimization = True if args.without_data_format_optimization == "True" else False + if args.without_data_format_optimization: + without_data_format_optimization = True if args.define_input_shape: define_input_shape = True if args.params_merge: @@ -267,10 +288,13 @@ def main(): if args.params_merge: params_merge = True onnx2paddle(args.model, args.save_dir, params_merge) + elif args.framework == "pytorch": + assert args.model is not None, "--model should be defined while translating pytorch model" + pytorch2paddle(args.model, args.save_dir, args.input_shapes) elif args.framework == "paddle2onnx": assert args.model is not None, "--model should be defined while translating paddle model to onnx" - paddle2onnx(args.model, args.save_dir, opset_version=args.onnx_opset) + paddle2onnx(args.model, args.save_dir, args.onnx_opset) else: raise Exception( diff --git a/x2paddle/core/program.py b/x2paddle/core/program.py index 08e4bf8..9f35a8b 100644 --- a/x2paddle/core/program.py +++ b/x2paddle/core/program.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. @@ -14,62 +14,161 @@ from __future__ import print_function from __future__ import division +import paddle.fluid as fluid +import os.path as osp +import paddle +from paddle.fluid.proto import framework_pb2 +from collections import OrderedDict +import numpy import collections +import sys import os +import six +import pickle class PaddleLayer(object): - def __init__(self, kernel, inputs, outputs, **kwargs): + def __init__(self, id, kernel, inputs, outputs, **kwargs): assert isinstance( inputs, dict), "parameter 'inputs' for PaddleLayer should be type of dict" assert isinstance( outputs, - list), "parameter, 'outputs' for PaddleLayer should be type of list" + list), "parameter 'outputs' for PaddleLayer should be type of list" + for k, v in inputs.items(): + if isinstance(v, list): + for i in v: + assert isinstance( + i, six.string_types + ), "value in inputs should be type of string or list of string" + else: + assert isinstance(v, six.string_types) or isinstance( + v, list + ), "value in inputs should be type of string or list of string" + for v in outputs: + assert isinstance( + v, six. + string_types), "elements in outputs should be type of string" self.kernel = kernel self.inputs = inputs self.outputs = outputs self.attrs = kwargs + self.id = id + self.blocks = list() + + def add_block(self, block): + self.blocks.append(block) -class PaddleProgram(object): - def __init__(self): - self.layers = list() +class PaddleGraph(object): + def __init__(self, parent_layer=None, graph_type="static"): + self.layers = OrderedDict() self.edges_out = dict() self.edges_in = dict() self.inputs = list() self.outputs = list() self.parameters = dict() + self.parent_layer = parent_layer + self.graph_type = graph_type + + def set_name(self, name): + self.name = name + + def set_parameters(self, parameters): + self.parameters = parameters + + def clear(self): + self.layers = OrderedDict() + self.edges_out = dict() + self.edges_in = dict() + self.inputs = list() + self.outputs = list() + self.parameters = dict() + + def clear_edges(self): + self.edges_out = dict() + self.edges_in = dict() def add_layer(self, kernel, inputs, outputs, **kwargs): - layer = PaddleLayer(kernel, inputs, outputs, **kwargs) - self.layers.append(layer) + layer_id = str(len(self.layers)) + if self.parent_layer is not None: + layer_id = "{}.{}.{}".format(self.parent_layer.id, + len(self.parent_layer.blocks), + layer_id) + layer = PaddleLayer(layer_id, kernel, inputs, outputs, **kwargs) + self.layers[layer_id] = layer + return layer_id - def build(self): - outputs = dict() - for i in range(len(self.layers)): - layer = self.layers[i] + def build(self, inputs=None, outputs=None): + self.clear_edges() + outputs_from_nodes = dict() + for layer_id, layer in self.layers.items(): + for input_key, input_var in layer.inputs.items(): + vs = input_var + if not isinstance(vs, list): + vs = [vs] + for v in vs: + assert v in outputs_from_nodes or ( + inputs is not None and v in list(inputs.values()) + ) or ( + outputs is not None and v in outputs + ), "Couldn't find {} in previous layers, the layers should be make by topological sort".format( + v) + if v in outputs_from_nodes: + in_layer_id = outputs_from_nodes[v] + else: + in_layer_id = -1 + if in_layer_id not in self.edges_out: + self.edges_out[in_layer_id] = list() + self.edges_out[in_layer_id].append(layer_id) + + if layer_id not in self.edges_in: + self.edges_in[layer_id] = list() + self.edges_in[layer_id].append(in_layer_id) for output in layer.outputs: - outputs[output] = i + outputs_from_nodes[output] = layer_id - for k, v in layer.inputs.items(): - assert v in outputs, "Couldn't find {} in previous layers, the layers should be make by topological sort".format( - v) - in_layer_index = outputs[v] + # 将block的输出用于父图 + if inputs is not None and outputs is not None and set( + layer.outputs).issubset(outputs): + if layer_id not in self.edges_out: + self.edges_out[layer_id] = list() + self.edges_out[layer_id].append(-1) - if in_layer_index not in self.edges_out: - self.edges_out[in_layer_index] = list() - self.edges_out[in_layer_index].append(i) + # 处理子图 + if len(layer.blocks) > 0: + for block in layer.blocks: + block.build(layer.inputs, layer.outputs) - if i not in self.edges_in: - self.edges_in[i] = list() - self.edges_in[i].append(in_layer_index) + # 删除不必要的节点 + invalid_list = list() + for layer_id, layer in self.layers.items(): + if len(self.layers) > 1: + if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get( + layer_id, 0) == 0 and layer.kernel != "prim.assert" \ + and layer.kernel != "prim.exception" \ + and layer.kernel != "prim.warnings": + invalid_list.append(layer_id) + for layer_id in invalid_list: + self.layers.pop(layer_id) - def get_layer_outputs(self, i): - return self.edges_out[i] + if self.graph_type == "dygraph": + self.get_dygraph_inputs() + if len(self.outputs) == 0: + self.get_dygraph_outputs() - def get_layer_inputs(self, i): - return self.edges_in[i] + def get_global_layers(self): + # 该全局layers的信息是按照拓扑排序组成的 + def update(layers): + global_layers = dict() + for layer_id, layer in layers.items(): + global_layers[layer_id] = layer + for block in layer.blocks: + block_global_layers = update(block.layers) + global_layers.update(block_global_layers) + return global_layers + + return update(self.layers) def gen_code(self, code_dir): def write_code(f, code_list, indent=0): @@ -80,19 +179,22 @@ class PaddleProgram(object): else: f.write(indent_blank + code_line + '\n') - f = open(os.path.join(code_dir, 'model.py'), 'w') + if not os.path.exists(code_dir): + os.makedirs(code_dir) + f = open(os.path.join(code_dir, 'x2paddle_model.py'), 'w') write_code( f, [ "from paddle.fluid.initializer import Constant", "from paddle.fluid.param_attr import ParamAttr", - "import paddle.fluid as fluid" - "", "def x2paddle_net():" + "import paddle.fluid as fluid", "import math", "", + "def x2paddle_net():" ], indent=0) - - for i, layer in enumerate(self.layers): - if self.edges_in.get(i, 0) == 0 and self.edges_out.get(i, 0) == 0: + for layer_id, layer in self.layers.items(): + edges_in = self.edges_in.get(layer_id, []) + edges_out = self.edges_out.get(layer_id, []) + if len(edges_in) == 0 and len(edges_out) == 0: continue line = "" @@ -106,16 +208,280 @@ class PaddleProgram(object): line += " = {}(".format(layer.kernel) for k, v in layer.inputs.items(): - line += "{}={}, ".format(k, v) + if isinstance(v, list): + line += "{}=[{}], ".format(k, ", ".join(v)) + else: + line += "{}={}, ".format(k, v) for k, v in layer.attrs.items(): line += "{}={}, ".format(k, v) line = line.strip(", ") line += ")" write_code(f, [line], indent=1) + + write_code( + f, [ + "return [{}], [{}]".format(", ".join(self.inputs), + ", ".join(self.outputs)) + ], + indent=1) f.close() - def gen_parameters(self, code_dir): - pass + def gen_model(self, save_dir, input_shapes=None): + if not os.path.exists(save_dir): + os.makedirs(save_dir) + if self.graph_type == "static": + code_dir = os.path.join(save_dir, 'model_with_code') + infer_dir = os.path.join(save_dir, 'inference_model') + self.gen_code(code_dir) + sys.path.append(code_dir) + import x2paddle_model + scope = fluid.Scope() + startup_program = fluid.Program() + main_program = fluid.Program() + with fluid.scope_guard(scope): + with fluid.program_guard(main_program, startup_program): + inputs, outputs = x2paddle_model.x2paddle_net() + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(startup_program) + + param_dir = os.path.join(code_dir, 'weights') + for k, v in self.parameters.items(): + if scope.find_var(k): + self.dump_parameter(k, v, param_dir) + + def if_exist(var): + b = os.path.exists( + os.path.join(os.path.join(param_dir, var.name))) + return b + + fluid.io.load_vars( + exe, param_dir, main_program, predicate=if_exist) + fluid.io.save_inference_model( + dirname=infer_dir, + feeded_var_names=[i.name for i in inputs], + target_vars=outputs, + executor=exe) + else: + self.gen_dygraph_code(save_dir) + self.dump_dygraph_parameter(save_dir) + if input_shapes is not None: + # 如果input_shapes非空,则导出推理模型;其值类似[[None, 3, 224, 224]] + self.dygraph2static(save_dir, input_shapes) + + def dump_parameter(self, param_name, param, save_dir): + if not os.path.exists(save_dir): + os.makedirs(save_dir) + dtype_map = { + "int16": [framework_pb2.VarType.INT16, 'h'], + "int32": [framework_pb2.VarType.INT32, 'i'], + "int64": [framework_pb2.VarType.INT64, 'q'], + "float16": [framework_pb2.VarType.FP16, 'e'], + "float32": [framework_pb2.VarType.FP32, 'f'], + "float64": [framework_pb2.VarType.FP64, 'd'], + "bool": [framework_pb2.VarType.BOOL, None] + } + shape = param.shape + if str(param.dtype) in ['uint8', 'uint_8', 'bool']: + param = param.astype('int64') + if len(shape) == 0: + assert param.size == 1, "Unexpected situation happend!" + shape = [1] + assert str( + param.dtype) in dtype_map, "Unknown dtype {} of params: {}.".format( + str(param.dtype), param_name) + fp = open(os.path.join(save_dir, param_name), 'wb') + numpy.array([0], dtype='int32').tofile(fp) + numpy.array([0], dtype='int64').tofile(fp) + numpy.array([0], dtype='int32').tofile(fp) + tensor_desc = framework_pb2.VarType.TensorDesc() + tensor_desc.data_type = dtype_map[str(param.dtype)][0] + tensor_desc.dims.extend(shape) + desc_size = tensor_desc.ByteSize() + numpy.array([desc_size], dtype='int32').tofile(fp) + fp.write(tensor_desc.SerializeToString()) + param.tofile(fp) + fp.close() + + def get_dygraph_inputs(self): + def update(layers): + for layer_id, layer in layers.items(): + if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get( + layer_id, 0) == 0: + continue + if layer.kernel == "fluid.dygraph.base.to_variable": + value = layer.attrs["value"] + if not value.startswith("params["): + self.inputs.append(value) + if len(layer.blocks) > 0: + for block in layer.blocks: + block.get_dygraph_inputs() + self.inputs.extend(block.inputs) + + update(self.layers) + self.inputs = list(set(self.inputs)) + if self.inputs is not None: + self.inputs.sort() + + def get_dygraph_outputs(self): + for layer_id, layer in self.layers.items(): + if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get( + layer_id, 0) == 0: + continue + if self.edges_out.get(layer_id, 0) == 0: + for output_name in layer.outputs: + if not output_name.startswith("x"): + continue + self.outputs.append(output_name) + self.outputs = list(set(self.outputs)) + + def gen_dygraph_code(self, code_dir=None, indent=2): + def gen_codes(code_list, indent=0): + indent_blank = " " * indent + codes = [] + for code_line in code_list: + if code_line.strip() == "": + codes.append('\n') + else: + codes.append(indent_blank + code_line + '\n') + return codes + + def gen_head(): + self.head = gen_codes( + [ + "from paddle.fluid.initializer import Constant", + "from paddle.fluid.param_attr import ParamAttr", + "import paddle", + "import paddle.fluid as fluid", + "", + "class {}(fluid.dygraph.Layer):".format(self.name), + ], + indent=0) + input_data_name = ', '.join(self.inputs) + self.init_func.extend( + gen_codes( + ["def __init__(self, params):"], indent=1)) + self.init_func.extend( + gen_codes( + ["super({}, self).__init__()".format(self.name)], indent=2)) + self.forward_func.extend( + gen_codes( + ["def forward(self, {}):".format(input_data_name)], + indent=1)) + + def write_code(code_dir): + f = open(os.path.join(code_dir, 'x2paddle_code.py'), 'w') + for code_line in self.head: + f.write(code_line) + init_writen_codes = [] + for code_line in self.init_func: + if code_line in init_writen_codes: + continue + f.write(code_line) + init_writen_codes.append(code_line) + f.write("\n") + return_code = "return {}".format(", ".join(self.outputs)) + self.forward_func.extend(gen_codes([return_code], indent=2)) + for code_line in self.forward_func: + f.write(code_line) + f.close() + + self.init_func = [] + self.forward_func = [] + if indent == 2 and code_dir is not None: + gen_head() + + for layer_id, layer in self.layers.items(): + if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel + ) or layer.kernel == "fluid.dygraph.base.to_variable" or \ + "paddle.fluid.dygraph" in layer.kernel: + line = "{}".format( + layer.outputs[0] + ) if layer.kernel == "fluid.dygraph.base.to_variable" and not layer.attrs[ + "value"].startswith("params[") else "self.{}".format( + layer.outputs[0]) + line += " = {}(".format(layer.kernel) + for k, v in layer.attrs.items(): + line += "{}={}, ".format(k, v) + line = line.strip(", ") + line += ")" + + if layer.kernel == "fluid.dygraph.base.to_variable" and not layer.attrs[ + "value"].startswith("params["): + self.forward_func.extend(gen_codes([line], indent=indent)) + continue + else: + self.init_func.extend(gen_codes([line], indent=2)) + + if len(layer.outputs) == 1: + line = layer.outputs[0] + elif len(layer.outputs) == 2: + line = layer.outputs[1] + else: + line = ','.join(layer.outputs[1:]) + if layer.kernel == "fluid.dygraph.base.to_variable" and layer.attrs[ + "value"].startswith("params["): + line += " = self.{}".format(layer.outputs[0]) + else: + line += " = self.{}(".format(layer.outputs[0]) + for k, v in layer.inputs.items(): + line += "{}, ".format(v) + line = line.strip(", ") + line += ")" + self.forward_func.extend(gen_codes([line], indent=indent)) + elif "prim" in layer.kernel: + func_name = layer.kernel.replace(".", "_") + from x2paddle.op_mapper.pytorch2paddle import prim2code + if hasattr(prim2code, func_name): + func = getattr(prim2code, func_name) + func( + layer, + indent=indent, + init_func=self.init_func, + forward_func=self.forward_func) + else: + raise Exception( + "The kind {} in paddle model is not supported yet.". + format(layer.kernel)) + else: + if len(layer.outputs) == 1: + line = layer.outputs[0] + else: + line = ','.join(layer.outputs) + line += " = {}(".format(layer.kernel) + for k, v in layer.inputs.items(): + line += "{}={}, ".format(k, v) + for k, v in layer.attrs.items(): + line += "{}={}, ".format(k, v) + line = line.strip(", ") + line += ")" + self.forward_func.extend(gen_codes([line], indent=indent)) + if indent == 2: + write_code(code_dir) + else: + return self.init_func, self.forward_func + + def dump_dygraph_parameter(self, code_dir): + params_output = open(os.path.join(code_dir, 'model.pdparams'), 'wb') + pickle.dump(self.parameters, params_output) + params_output.close() - def gen_inference_model(self, model_dir): - pass + def dygraph2static(self, save_dir, input_shapes=[]): + from paddle.fluid.dygraph.jit import declarative + sepc_list = list() + for i, name in enumerate(self.inputs): + sepc_list.append( + paddle.static.InputSpec( + shape=input_shapes[i], name=name)) + import sys + path = osp.abspath(save_dir) + sys.path.insert(0, save_dir) + import x2paddle_code + place = fluid.CPUPlace() + with fluid.dygraph.guard(place): + restore, _ = fluid.load_dygraph(osp.join(save_dir, "model")) + model = getattr(x2paddle_code, self.name)(restore) + model.set_dict(restore) + model.eval() + model.forward = declarative(model.forward, sepc_list) + fluid.dygraph.jit.save( + layer=model, model_path=osp.join(save_dir, "inference")) diff --git a/x2paddle/decoder/pytorch_decoder.py b/x2paddle/decoder/pytorch_decoder.py new file mode 100644 index 0000000..c1a626d --- /dev/null +++ b/x2paddle/decoder/pytorch_decoder.py @@ -0,0 +1,34 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + + +class PyTorchDecoder(object): + def __init__(self, script_path): + self.script = torch.jit.load(script_path) + self.graph = self._optimize_graph(self.script.inlined_graph) + + def _optimize_graph(self, graph): + torch._C._jit_pass_constant_propagation(graph) + torch._C._jit_pass_dce(graph) + torch._C._jit_pass_lint(graph) + torch._C._jit_pass_peephole(graph) + torch._C._jit_pass_lint(graph) + torch._C._jit_pass_dce(graph) + torch._C._jit_pass_lint(graph) + torch._C._jit_pass_canonicalize(graph) + torch._C._jit_pass_lint(graph) + torch._C._jit_pass_constant_propagation(graph) + return graph diff --git a/x2paddle/op_mapper/pytorch2paddle/__init__.py b/x2paddle/op_mapper/pytorch2paddle/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/x2paddle/op_mapper/pytorch2paddle/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/pytorch2paddle/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e75cd059cf2fcde5a0c6c2a3addc9295b8dfc84 GIT binary patch literal 167 zcmZ?b<>g`k0^?sv@gVv!h=2h`Aj1KOi&=m~3PUi1CZpd1KIQ$h#3HTWGh(! literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/pytorch2paddle/__pycache__/aten.cpython-37.pyc b/x2paddle/op_mapper/pytorch2paddle/__pycache__/aten.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c6188962cf21350cfd4ae4a10362f9e92e94330 GIT binary patch literal 89800 zcmeIb34C1DnJ20$l}e@3;ss-yB}61K*et@5Y{@2MBV=uwkU*AkmP%QtY?m#S#H}hL zYvgGMyAs3Cz(mV*E8wgbN8g* z|NFjk&po&5mP(d4NU(o8x9Z+=&pG$jw{Pdx=H_?}{&k*So+vI0*Zh>exc}(NNAZ{K zt*xn1H7e9wb2lBsy`jGFvT&#-RjX<@hR{a1t>ZSzZ9TU!ZX39bbKAHvq?-0LEQ_dS z)pCE$vN|x%qFI6|G zg*eVu?WzOEIjU1F!f~!zth#W#Om(XzI9{%ns+)1VLVZx(g5#CyL+ZmgzDIpTeH6#5 z)W_7VIKEeXT-}CatNMicB#u|BPpMDi_&)U+^;sO>uYOD2j^hW^Z>u|SoTomg?!@sL z^?CIL9IsV(sk?EUufC|hgyRDBW%U&tuT%d@EyMA8^;PvX9B)uxSKq+#MwL+C#Ia2! z)p8tfQawuHxKO3k3LM+jO0^2d4wY7`aqLuUR4+KS_?>H+m_96zoeR1e{Jo7$$f_&N1&)jk~WRL`mB zas0e`LA{9M7u5IEOE})8zOP=!@ou$W9l-I6>J@bm$1kZ@)ems|vU*Luj^kI<8|qCQ z|CRcoI)vjgbyyw2@vG{nI)>xd)N%C|j$c4fT8K-{F{0|6ct*j^9-OLHz-a zN%e>7k8oVB{#gA-9DCG%Qh$P@Qa@5Z#xbS-H}$7Du26rb{v5}Z>M!o9S@4&q2nQF0 z3Zd)JG~St1syFp1CpoaHP?t|ZplQQ%_AJnjX4J|A-vWrH|t;n?o~PGg31g-WyV( z%WBrw4%e;>t&gN@*4N>Dh|Yg4ygs_Vp3lc{E<79>uBX1kQEp>%zW>yQk@4+6Ebe%0?C4X) zR}L39929&g3JyZfD4DtXz9T2!dhljGO_%0n7PfV^ z-rRb3DwFMZT0hj9%%w6n-<({Yoj)(r*15oREbh2(^wERGo!>us_<_>f=gZv-Zf-Rz zDel~k#mISD2hBzQXwcT6{ts*`>(9SZAv8ENXMlhIwiUhkv})_6XUn!>i3`GoXgV{H z&t(hs{rMbySP(7DNUTid68d{0lk7_srt>N3DrcG!J*!eZYZ82}5a~^4bA{;IWN$u| zEi_=Z1kbV{UWoQ32L@73p>8EZUnJA7QiVntU50AruI_||Ci$w@)o4#@zR(!cdh(8w z%9xcFTHF(I%lB%W`S_tv-+ohHCU;XdpIMj8tVm{7E^NQ)D;@GHCsb-}s<(gOroj$* zLpSvgBxJoe4Xn%2ukK!;pS5jZT_Hvv5_os-B_N)Srlz4K6b(f}E#YW*Zm22T6pn_@TC&u1Nx&D^w+PCG4wZR=#Men3l20e^vmZ`=#K+WYJn#;tLxT8 zo%`r0XBc-Px71`h%!t%adb=ainL34D!|{=*T-zH8)#U0G7&LG%iYuYjG1b7>726XY zj_wL?M~~t7a0BC5JbxF6gT}Gs+LV)AnQBd{%5_Lo`$JJHa(PTYp;rnG`xpeqpyw>A9{@N@aT6(pWYLIikQUeKmOKt zi<_Pc=;mKV>wLl$p!)chmriZCuXyAUZz31r@oI_(j{kJSw@!V3cX8hf#e+Ls5+EoI zat&N{ngK|K26|+E!a^^U15OQv!LxVGyPF+UL z<I&c?cy(5&!GU}96K1y~pf zh4DWciiMm-=(4+-!lmive_F)8DhUDB~5s3ZLXZa*eacSGi^Cm}$k z`<&~6ZA_}1OX*V!nnGxuGmVZBy49IZAM1F!(@k1b;g?oIqX_*M#46G#XCbaT?bJ+$ zEIFOja}wkzuE`O8#UyUYeiThjvrT{^A!7XQUsKp3Md9flS+a@J%^1TFB@&DcQQCtq zm536~Pf0KaQj73RtUb!2K}1YZ;wn4 zhX;YP41X-sP9$1)EbRi40x&(XTB-T-(rxV>K*f$FORAzGjWGX4lB;EbjD#+zGLq0> z(nWw?o~PB1Nti02k3k}ad4X>Ll*e-npR+G#f3Q5Q+MPVKef+@Qu>-sOq@O5k zRqEc^)eb->dy*mVi9|A^5{dcqI=cwSI~P}{@HEPNHcE-uvgHV5M{il>1Rq1&w7dkY zF{SUSf#nhq_0jgs1*h#z40-x>{VjfvEGcfK^j)kGpza^WH3Mo5_+o4k5914{c&!U= zu((BFe~XZv0oJ&v`Bcw<7lN!$_BXyhT+aX`pKHnfX8I6FMTD~JK6D0Ve&{pd;o6Z} zw$a64MSu`8TuU|u2C~?2J=+iJ^VfS+7p6H}IS;;L<#Qanr`J z*GJqL1Uy+?XRAPcsyEdK&IYka@MZ5GhsWeP%WEg1A7&GmOOIf{YR&p+nWJi8%)Wz-$uB%+5vi9DJ2$ zfvId~ICF=>a0^%=~7cmCK=zJ~z7mxl@mxC_c5N_|S&p*3GA$cx3G8 z$0!_RwPqk*# zD>KPl-buCQl51duY#nNBO*$*{MDMa~r<+|10{8aOypZz=%&;KtDPM)!-c&{m2c#jf z^`H>v<-vH-lPffQVz4I#vvq%_Pz#&qxwR~4QppRw91Jh}Z?J}%s3k&%;7f?uqT@^u z(aS?EzH_#TVV1sbpR4ql`pyl9L$hte#AmSrLz0{DC5At?mVO#5R3RWl4an(LbX1`_ zB7@=Xn%Wv5MjZY&K6?wUjMQ}93*Ybt5PGmJ3J*zxx&$7QdNoVmF=sdm zaxfc6TE9y?B#pyymT>0g_rPAY8R2L-O)|wJG$Aj(p}wG;-Ay6GO{dWdo@JobZG*sg@bL2Z0CtcLFNqvcu}xq zBXCK-H7O^P>c#u=yGVHW#hpvN-a`L;eiZ4n$dloUd%s`Yv?*XDycnZkjP8D=xbyk3 zM_%knrF)B8j)Q`V`a`y(El0>(bL9Er!wKlRKX;Zhai*PaZ#kOZqo@=t%LIXUg7VAwrHKvD^alYs8RC-WRdq zHj3j3ejt?+Yi<*tA@WPEA-3u^(3?v1!C&hz6KF^c_Vnhn@B+G$X*Qbz`Da!TPTh0I zf;!JFRfrIqDAcY=tt-^AZ`rw#rf)-Ydb%d_nM~WAA7BSf3m=0@A|H0zN=-pMfZi6g&AZ-rtj;g4Nue8rt52Q%r-;wjG5e<&Rn9; z2>owV6Vd;F^63Aq$z0E>JNlgxFBoBZRWpkCsg{r}MH}ke7?kBSY9fPSVKXO(aoZB2 z^xfS=`@6e|_IG!aNwK@Tn_67u*@!ky?%%xB;6Q%{VplrZ3->#_`4cOWJ&2rW>mYv8 z-NoZCsm@Q#V+6612M&u=DVlWvH*p`&vR&GG^{6`lh};-HzxA!+tM~cnC|-(n@4cz? z%2l~+Ne^%odBY&K%hSoy%eJ$vxaq~>s~bq<-f*mBlx}Mj={EY<;o=Vu_`59uXVO+J zVLIan_KzLi=<77$%Ws|UI`fDH9MNVvSrIst%!$`A`pP4SXY!4UAI-ZPZ0F>W&7vxd zz4#h<3miSfy0&f}eP!FpL*FL8QTmX8M*r%IJ6|ELnOB*NJ5`~9;gi7A<1u#^>eJao zE(J-O+=fH-ZSqqTnlkx5L>whE;P_eQKX(k(wb8HeNl_J|ZHc~AGQ(VFDBhM>n{?8U zinNju>+2`&Am8V(icy~$$fkSyGp8@{n25o_5EtOI=Di#SZoI|?m2WGFF^bdr79 zf>uP_uXWi~Nhm#r!z`Fy!d#ZMm^Z@QYO)-rP_N+??o_;xLWEvKp%!nTG_XQ=k4F~{ z5%0MTPgXOtl=s9e?$ZK})B-ON3^<|b;TE_z<&U_M6=#vcIEMfDj5&XfE0E>`WBn=_ zbI`vm&Vr?XH^w4d78^m_;haW1LF$JONsQnDigqkdN}HD9$4@*xx?P9=04MH*6x5r# zebQlsgKZrh0U-%J$sDZVB$Rho_4mjS9*q*A6jGlotupas7pRU(a>*t=3?4%W-q_Lm zN4M_8*D|{#s2TjTkLNy6=4=hdMx3H!PAQ&q<+%dHH3LF zP7*_i@5b9YQQY9&cv%vfOXj2pexL#H%n_=)W04VwESrDNs(faRD-X4Ivq03rZTr%y z7)E0T;FqklGJdh_g}JPIT{I(?hTLwD&BY!P!RB>$;4Z;1)2;Z_F7RW2w{{38CEdo3 zzHTX@rMNha7;3+h$fkP8xFtNbP>X<_(=&YtnjPp(m*VG|i-VsMh{L~ND}?7P#aEO& zT>arUmhB80m5B-LwXyY`GGM&ki=apmMNWYzg3k+kq zVxHkOV=upa^3aBphh7r@lnAfIhc+Sboh}`LZ&qLO0Wbx@QJfC&0`5KtrNl`%3a#DO zPgYi`Z-?8r1lpaC;O8D{Vpo`cSRco^0MR%zw8z$a*PkO0p3rUEX9FpjVM(EfU#up( z6;N<)OtBBZ{kpYoW%vOkUOZ5Xzc5&pHpM=GbPQYzaY9C9t9I#b4Y3+tN5YtV-YSmW zx~;X^6wB&3uCXb0YuGi#;tZQ&aR#Q?tx)b2qR?cFbV!b>VGfyBil9*;2P6%2{Et_4T<1nwN~vD#~8BTfm@@DYg+=XPMiqt26lOGF+Xh zX7JT(tgDys)j?dHr7q>Gv0PluW-km(yKKsxlfT15N^Q4$@(5{4Q%Wd71VWiA52%5g z1i22-R}vYDfg_&M-qx`QlHsByti>;?9+^tP4Mv4ED65xTDdEISW!)?6OKuEiT(l@- zfz>mVTVNOI*gM_9(<7HP*^lfgh^!VC>Pc-Q8zTjiI(MNNDq&irY89dhR=I3c*Wq|@ z`V=Awv^_vV{ue<@=tcTr_6hu+06w6Y3Tv>8Sio0Dz&8xQ+D0fq{a};Q{cz0yDFBvG z%K&pI7e?=pL_bi}HEdPkTx2+`npE?)Fl9httQIvbU{2Vbc+MF#C-T-|>CN&E0p3htXzfuh{xURfosGezg zCM8jq__=DF0W^iW1mH$3sP&A<5~(Bo(<=miY7^-d2-2j~hg1K8e}#H&B}UwGT+GE} zdDg&^%`fLcmemo&Ec6XdA~ zPmYXULH9+-axru%0eEk!O?TBDhw2#@Z(QhpXkSpz4#`54Gc2;4uThA=UY>P`Pe>-# z^4|WQHT*&rL<&*ynmDWJaxA?<^0W$d+-X5nk~lahx)VJ(e?u}kuBNY?sKGfJEm$B^ zBbj*@F*7o`*RYg@3=Fnm!}Ca?(Y|wG*b!#(nW+9mp&^v1A@Uhn8REEy?t+IB*M@Fj z=4n#|5W%)v@LceWe3*i3t)-av49~#FHQj`i)UMc2BnBckAsWe zRkLh{{{@f_q9l)o+{gSIRsA9PH=GspT6^i2yp?{*MzIOS_&1`szZdx{=!oAm zJo|>3_|^1Z7@0S}gHeJlLW8(+-gV;A3{W>atuBAB#js(^+`Or{fA`5l`(eZwJGvjA zi}$@a1=)JAEg*CkKUyxmGT)y^Lg=D}i^=xBXyKx+s=6i(7R0P8FIPCbD+06nNE2G4 z6t_Q9eBr^sev81ZwF2ore)0IT@JP~l>=yXxhMl9Yy*PS5&U^>>1oDJ8Ho9wMbpJkb zCBF73EKQ>iY&b~)C;P}=f8yx)E8l_LX#9-_Eeo+Hnh(X9Zl>8 zD*0w-=!)}xpdS zC3cdV4dTou$nKDODL^8ogYL!EcDZ(pao9b(+G6Z#n{Hffadx%ER3q-E^fB`ZcaoEg z-EFwmV%%ozWW$;1=FG&dHCiLBPsU@X+bmk`rMN$|x>3#LunPz}WnmYW<$vL!K7RSN zQ_4dXcT+n>lnOGnx;{4<(zP!UvUe<|h8%1i{FU5p9KzpGod%eBf(XI3ACA3%a0E&d zuj~=Ckl$x=;+yHtKyO%wod+A_4C}CI)ACYZSO;j1&=MVbRxbE=c=oDFfft>|@-@Es z#qo_B#$G#0Zm58K9u;D3XLKrabB(KXoANIPnw`K zR*84uBv?c?kgwgzP{T0}F_rF3l2J{f8lo$m{`^3;faG;Eiygi2afU)40Sq}0V6;hj zot$sbya{T)NsZ@TYAKN)3XKAsXXIOQk~HXYYFPI31VD*hlOcabgp>V96T~pE7YR~= z>qm-*V{TkS=t|p<^dM$4Qp3As){SU*XOY?ed0xpZX?bh=d-H^4?-JX$6|r@`MZ|)V z5G+a0`0*Rb?XV)apM;n<*Sd>qL2{orxaml6E%Jc>ujcT6f)*)p=KGHGKyLn z54Ihh&M`%GzD=`VP0jnLc|SF*HnRd<5ddu6f z10xxV>hpG7#40?n6S=-~;_LgLmr%-{{_GS~uce-|&yvNUa%Iz*e6~NWs8G!$TpAcB zK-R*VP9A=+qO9eg!y3RAx!4qmC-ZAxf+K82iffw>Ci4jc)wH2=mMV;isZLvV76D#a zQ5)z#J8dxX1$e+FdH&V;^H9>pc51^<5Oe-iMUmwJ?*y`W5R~q@yrFIIc@6}Rb4W-- zKuAbq{%amsBMq_m0Ai#(ci7nLuaxD<8wGo!RCttfDAzUeec3!>`}&gEHEmR!Vi5(~ zEvlX~SymT<1GZ&t6@gr4v4C6Lb5QyP%FUzs0bI-wMnX5e{N8ElC2GS>cOaw?u0R=w zbd*<*7N0gDY97W2lOe0Aa1!TMd^G7FJ^Xfjm=topgB}x9xe@8Poksi=HRsgYpao`L z)+WL;64k4@uPNQbm!9!3?lGj?hl3Z{q_)k2`|%Tx6rX%r;C}S6gNEnUOfI? zV1*F-;*)Q|h=j<7vFDGTJoMn`QxtPV-HvaD=lJB&Z4}PF^E+g>+Ohl8mL0eX>)hxg zZ=F28ZS42~d=*R7r_{-jmWGjO8p0=g6g$+8&ZR-M6;K&1~m4M>9-;c zye6g%qNs5PLEsUbF+|8GPj1DPO1}}#PpNoKSy=+Rsw3I>QT=YXHXTC7xxdn5H2+sv zjvJq;D?L(B)D(X3_JyUjXb>uenj8zOhqlwLT17KHcaORGqO_P$V9YWSfff7ts-;{7 zxV~+m@Lb+i4eRGjl*GIFTY??RsVo;IsXk&P1UPyWSA*ebH?Ev(IKp^kMUeKAH{C6O zMAZpOD~JY{!1yJWr{Rint6qC_3J|q?FtPmJz~Z4Qy%v#Je4PuBb#!%C7wXJ#5=Zs; zzKW8Z9$448c=r2}9p=&(20@z9g<1dQQt_(`Z$d*DmTkk`g@87fRIN??F9J7m8&xcV z@ffbX3v8}jZLz3#fenl>$YN8%7bQF#P=0x&8m#TIVa+F{{{UCcho=z4%}t=DqOp`f zO+{m+iiD)iQR{|h)92Cz>Nux6ARP}W%g}W>(8zU}$MSPL`TjPie*iv{DbT!j#3`3U zpbR&Eo|DSvdvj8Av7J0-?d{d+9u4V9;(BR9`}`b}37b2Gb&Ke+0evyMiEouSR;F+Km5heoNs}llW)i%w;K+2ZcoV^&Zf~&41+JW>-uSmHS{Lowy{IIOWJW?Q=4D#9~X>hiZ zln828xd32$; zb>HY?`^oFK;h0EPsLwmL|A8rxiFU*)l~j>T-2=M}XGXMlRNsJu25=EZQjZA3%j-sB z#7b#L{uCulTB&16eiFI;1i@H~mKgQaJw9v*E_Xef?)*G8RGeS@6s7S#6LZW${U9RE zv0cZq=wO0^-Ivv`b5E?}6M>*#FQ9+V%LMbSn7c&XhE*p<0h4^B*4c+X2EKi^!i(s8 z{wFL-+iwuBik%q7?tc^D$VGOJJPM}+zKoyPIkm#3ivu>PK=Ka!3yT+3wFTqm0PZ7e zsmyw8ueC6gl4tO%H=Yq^Xu2=i6D1{l(09Rn&zSg62&4$GgAInxb2cFP3X?4lpL;@C zksyXvLj>d6eITGPI`dXUD$1!e_3Ut<9@zH5yHc?igtmZQh@V{<%0(3KbjDgZjo55P z#C06oX zH4AIaQw2QNfrKp8XLgDgTZ*?951$x)c>h#b*pejyRuNzXiXnrOE$JpvwtdMG3LnOv z4%G`mps#Hck%d(x9yEbPbA)`Dhw&2*mK$j)t`ute1^Dx6t%kLZ?s-XSj=4gnM}wFJ zYL6Dzg}fe^j!Z84Y*QmQ9V_SP^sm%C&OPWl)bypKb2mD--TJxD-EJFJ9E-1}S0J}4 zy`tT@7q{r-FBV-L;zKx*r=e!}$YDiN=kGsqw_VkY^A$N(0Wvef;E*?zT;7{xEipAnZ}q1ajT18A=`UOexd}b1GanBN7`ybxS>OJ!E^-z!`~IY$Tooq1S8# zIsp4$X(X@)!y@c;QI7ydqscLPB72m)aPg7&a3hmudwg?N)(W=OsAjtap1;k(H}V3{ zWs>c^vEb*sYzogutLG}`c)keUmG+2susMU}4FFCz0&cQvvA7b|PUpedWYCd#t zwj_oUQJxO6+$5wKWV^g46|;1EI4@&nL#}csxAq=gBecH$u2UQRu?8Qa)3fAC7Q@g$t!j-SnP@> zw+qFMph){JVkt&;+)8zfD1h{-r;8^xo!Wb}q^BJQ6Wqtlge-J;Bz^aj@CosaYLgv! ztGI8A@1iA`a!p?eM;v|jmC}+)g&1WlIQ{o#Ik}5v)dFWR^dSGFFOxm>&nO@up{FIf z^g5sP3KDY=@LL{z`bnI%Y?z;-uM`@3Zlh1dc@2=*Na#%zpr)lH$raUPdH+Q0VCoAw zeHDETLXriCB;t?bFZ=J%R6gOzMKp$nc%-T_Qcqe2N2EcE^^09qmR5Y`p~ze3KS8vC+^qR_%>Gh{FR?M zQ|L|rl21qaJWe=Ypyn=WjGuukWem;oX+nAeA7frY1J+wSmYFJ}mEUK8%>d3w^$AjV z#DT7tv7hj338r_q2qcY1a&dIk=8|u+OONyjgIPKhBI3h0qpw^>}@G+aw}{I+uFEjFW;FcjU8!nSRGsQ!X6L z@ay@<31Q~-iZDZA9%sbS-I!X~5wwOL+bc1(bcXjO*CMr~Z;2Je?E}7#K9i5TrBbQ^ za5d!^T(_ch0){v^%8{qf8)$$98p_146G4B-4zWs5w^2#n&aS1^cVP^iqH2t;f368b zbzP1>UVl$RcL+FhE|{1VtfLcdX{F3I*H|gJMTOs#|9+8Qh^{cZA?_bxdTRZmqu@W zvUv2!Nlt*JJvq6cYjM*Lv3D44$h`9*2n47yHTLZ{i-+GldFT;HVWZm)NWN#0OK75l zSPiEjI2nF!qWZ|G=wvVu>q>IS=;!ZSOSK#2N-~;^eVo@`lWt@ZCx!c)_h?=k}X)+*esOO*YGT zh8N!6%r{Jz(fa}nDT@)$B1)l3txflsFfksr%Zr|oNS+`|iu5#DP*QKrD=$WuOWc8l zII2RBCk+uQqWzKA%@lbszoL(ag`7j^RBmD6Z85LJrSqEKD^X!)WuGOAaRVl;nO0(F zfqliQO6|9`97ca5&HJbb8IR6K^eLpXbDB87o+CGHpgzCA1#n8j5c9cG*WWc}19ZvSFMH@UvC+*M9@`H<5xkdWds(3|W@jh}d& z3SaNI4Yz14`dqeTZ#B8!0L@`3!+(*~9` z&|HDRkRPC_=TUPFH9;Iw60c~4L4tEm7|``1-TkXvPcQ9J1uyJT3}qnO7YFt>*EyOz z2p;DgQmXVxS?jz3?-W3p_VV>>w21`V{OMe(k1DY97JN&pC%%|1feo|K3&WE9n*%fX zrTyo)BrO+Nr!9y1Va!v|H6Ks#f;`4vMunZYQ#D`A6MLbA^!5>9cq8#ikp zFu3~vz86r*V4~{(OLQ4z*3@RSLWWV3XN~s&`1tq z+#x=epWws!;QN)cPR_%wtKtrl{X^hA)*>q z@!y;0_A-}<%eB4zcSO{_4MX1tA;$bc%Ue*>FcO|Vsy~*E44@CxiLU#{IPS46yzFBEm@C-!?z7Q@6=X0Pj?3CY?pha2KeN6^{T-7 z$2PF~GLLTo(R#<}oH0~2;0Qsz!9|1FIUd{0K}^PWa}cM0MMw0+8RRh2PVo(E8k~RT z%`4~**}S3!n^)AVHsA4?c9(CWdmvIbvllF`rgIg9X#pDTjC_~n1$*_r;_-*!1w*;P zDaZ}1G%jolC~yp44Z;@e4c@5$EXvkU0fhE$iZ+BKK-L`D;Iyi7+)Nc9F=NMc^Ar`0 znpVifrDSLMJb{Oi#n>Bu7%h-z#lDVFPg-3sQ6uQp%TN|Nr z<2j&mqv)sXsE$5JC_c6O2c!F+ML@=>C*SlflJo%Wq?4fnHb!68z`v`1V|3ZKY&xAx z_JZFl+1ifv15=rS3#NPfqE82YJ6}U%Z*$K2*c}+gN1j5?e?q@A*sJOrG&b)#!OBEw z?)kD7^9*OEz9(ti<#|Sl#7|}R^^m~fQ`$2g%|z= z_XevQZ*!5d)e|Yt)&NX9*!aZCUiihkDbcjM`fkWefG`@EjY<-106w^b48Z#zsA2}L zY~J-(Sm=iMI(0KJ$jJqcrI>*eQeLzc9Y~%hL2N}HNINAsyiPfV>~@e-;+0tw1kQn6(voO{&^ z;^4+oLlgOi0b;Tizd#f0RW_@(y~<_gCalN@XBR+85_1zK{s)4Yisq(2e>=j*f6?$E zxp%$&EAf+@sgb>VNf3SPUrAiQdXO949Y6$;ej%#~pkWx+x~I3a4T&=%+`X z-FreqaMxO}zOPC}oxhrwr%GO+2gA&5Dn*x;(mYV5a zeup?3V~85$tV>-oC!)eipE0#0P^l8xn{^x!=#B#m4eq1X9tYpeIM{qtdhj^7qOgy; z87H(lqT*#exWa|3hw5XhCSB#i>Pw@T5u0hm!2T_AR#u?UPmz5`>SXX;CC@XRpNCp` z9$McFULVf!d^oC^`~;V%S!P}3eJ<({NkN9ny+ z>vxAc^{V9!Rvf|VA4mD7_rbcx&yDIHarJ(DL;VbfHa&uYln?0fu`Ya~N+#~_g_2YJ zyeLJ*dwh(#?g2%%W&8-C8zZxUWBuRc;h5;&P-y{XLs}< zCM)}-R@(2?ov@7A%G2UW{hXHlgt9d27EkQHQ&gV!1>pvGb^rL;2WLp()~Jy zn(S`6DpkKDRjH($V|s<-;Esv~Myx%s3*kh&u5pE>me+$Cy3&{SjF5=FR3>+CI-5!$ z)S|qi2z&iG?|G1(l>Pq!#YCJrw7$~Vo66NAucg7Rp=Owx_0*JTXc7}(W5maaZ#{;t zZcG4bGa)Z}E~+)@f+F^3eU9o!(p4%iBA=r8JQF?To+g;}e4oOo{)8F^w6};?h=hui zc=e*F;uQTM5<%bFLa_&bjg=XN@LjHHIPAMzlj~9V%PIcmaLxP$EA0FLW1)a8eC?jz>DCO8 zx&jD;HGO2C1K$5&uhbr(&)63Z6@ai4pcKJHjx0GO;Eq5oKQ8Q=fo7pgyTjImNUOT&ONS296i}M>l}9T2BkU1;QLf@(pn6>J zwIgVdhcSwS-vw8m?z-u@N_Un7c5%{BRP+l^5Fc{0-0*7BeR;ooy1^>s!(!}gfSc4m z{|tTNl0H|`r%8pj`t#cgan`yj*E|=SG1e7ofDrpWOk>4=L5y_EX6fQt5Gz}19WvG$ zR3U*9_ew&?J2JS2!e3wx09w`4?#MbIhJ$EurU7R-#RH|iVhWpuz+6KcjnG}ZbqI_@ z_`eeWQGk?6e95&IJ>oPVYJ-}-kq~}{9%~kR4lfDt9A29Lm4~c?rlYTpoJDX=P~9&4 z+=&5b!yPEWtTR8_DXfMg&pRlnrh~tdV&7gJZAGz%aZ=zOsI7p>R*>vd9l(`$$6_=gA#R!I=`J(f8h=F;b}We-ZpMCUoJGCxWIk7ghvyD9Jal z&VrUb)0rVci%H6!Kdp!$R6nf}sXm*GMQhvS^HPckav^0pVgo#caQ_t@aR!-;IL8~@ zSs9TNz|O>5F?QdR})^5 zL|BNeNu>r*%&YJ8T%Q-kNvV8KDnUPokL!h;B5o_sr}VNYS%@<}mUEU*DV0@dJx>wJ z6Je@pDybj@ZfIvx+Mg|nZ4`GJKIxO#_TWk-GJx||B!F0J&WpqrL5uTw7b;yc5W{Dt2QKRHo zm5US0aKt|@P6~7JrH5pY9@5@PGD>@A z^+NXT7{E32=uDO1tu;GgfC^6NC?SWP5CDQZPD~ml79(?!GCwk@2C(0WmD*Dk!S5nQ zr2i=v^z8zuWzx!Z92aK6_eWvl~ixw><<;X{5 z5*)e(4w%djCRA!gI+Idu)u{~+WylRfX)u`hhBXB0OdzKJg2(^L78Cl-d}KnX`I!wh zKJ(ZyT&X1XSJM6w3QQV3IbCa3hoOCciH>rHwePU5M}&>y;O(J)(%Y>^B<@=D()Eb& zodP)F?3Hn#*K0kVT)LsvQB}`6c{D&LkL91SnH?1u)}?j1i11H0d8Q+riemkl6gFC8cMyqalzz<{Pgk!)YQ_xE;<03Jy)ND2fKAI&BUOQY zR}J-(3d#XxeqHj2JlfLjk`^spio~DWxWX^Fr^@$Ck{&*{O;E94A+X;BtcebOD?Xj( z-Qlra1HCXGwGBBx$93OUEv#6xOP96lPtiz|^6O64MIwSBE#)hONff;3Zq~vXV^CCV zmD;x%(tkr=qg4B_i*km}6uY!yJt5wlciS2^c$^UD7Fjs=%Hgx6_#K^`nI?7{<3Pdw z3w`+gZeq z4AWMxlW#pWw&{h@{ries(^m={$ldNuf(XXZQi#w)p7!tJnsbtw>_C6kFPVm%UjUdc zg7Hb%jQ%k;5OQ3Hg}BZh-Gv7d-VPdLyyyVWY}C+reSMsf*XF*Df1Va$GTa^8CZ9gVMZyCM^_|!a{bO}|6X_l z{r8qo{r?wl?IHlUn5wXx7YuMWlo(e_%ClR*{SOs^`yj5IkL-N#hYM7koszz%47l2< z5Kz96lJ4DVtJQ@fGfn{f(gG{ebbVuaadx`P2eW5WoShyKi>loPfIPJkjf}b4Redrbe;5&K1e7 zBY^2Dg={y;0fBt{9(|(8)QR@8IL{VX+vJQOylWa1%gK=K|F0s1|Jo{}pYU*CbmTB> zJ)(cXy&z`Z;?DaeZa|lu+qbQ_mCECsp<%LnA??voB8oeyffVgh+O?GS&xWX6HBN(u z36N!qysC;)IL5c`5+Ph}@!)ZDnbSF$6keM4Rj$o~qPkOC<7pg1BTHP?S zEA!WS6m9(AM%u@i?xFz zFUKaN_WRTI<4DJE;)H!9^K&oRhoiJt*y`o{3iAi+ufK~c=ff&*EcX-bz%iLUH)@By zO*|mZ>M+d=N`cY0Rc)#IcGa%20BoP6uSoB1>g6)4nRE&yfo%dts2nIhXe zNd&4Y08zPjk)q&J(rYev+;`eWN0H;S=*4SgDTf4|cF+nc3hb8Ym9FwaTGzQ$f|svj zRNbEZ>~4n=nWMlSX};V^UQrHMrm8q^?Ly6GEKDxBkj0?K<4T;;*wOoGr@iAZk8XPwoy9wC9^j`N zc8qy1{9R0EXSSOWgMWj-*PYDd_8_@PG}gi zO3K&(JoK@s{feK zg~oGh%ou-FXjWJlO-q=VNi^hy0BOMt<8|DBei+8;TF9#$?tr8P$zP`&w-_Zd;0m>F z&;)&kWRJ91AP%7ykc#rxqnxl%X-lS0d+39`iV%}S71j?tA!;Cmopf?}Z)yt2glx?+ zR^aZ))J|~^^jDo1P|~MBu8m20rGfRS+Tm|CcW4K#JXm0N6aOHK!;=Q25C~(y3cOA) zznr0Jf;+!+YKuvXwUCPKqN@p5DU-lCh6ehvlkRJiA#5ar^cKdi;EQKq>5YRLJQ41k z#xsa&EQ|RWApu#N%8&pFo@HAu)!Jwf!%c>BJdZDk395Bb;cMabk@as zu|A^IME6qJ1^Ha7DAvdAL()-%b9dZ6bf#8=zl=k`j)HAy-9GfWmcc(fTq|WF*4G34 zQMdsK2{lTjqx#_}7k6vO*Li>sL(tjpMfM?06|wJFE|JE*tn=rsfaNC-nYWD+JRwuD zm5;v+wpI7ZdUqRThgRuh8{VM8JE#LdcumSL*kC%}b>#2m&6eB@wS@@XDn`#Q;Aa%D zy$hd)nl;$UQ~3b5u9T`XEoV55m8uY&kUR;KrrwgoeT&l}o9HN5BFzz#)+um_*F~f}wLG<+g=S4rurZZDu!%G9IPGTh504$Tw0xS~{?j|A8Vw^xA%A7p> zU_}`6&tVN9Y+0=BY-ySZtl|T_DP$W)yU@TSksp}&(*#HrW3KCMFaj_2D9|tQ#pQ;? z`@kEiL~aCJvaV9}Ec%OxhyL1F>56!CrjB@}9p8tj*m~BRBF;}lSVWX;6j8U^>F5_9 zt(@11Y!LC0dbv-RO{F~o-9a%klO&efM`LvmZ?v)W_7QUbBIpYd!{?<6Efsv$Pgz8n}DkGS=4&;tQ5H?q7 zZtf9t`wVd$*ftQ zv({{%iK=F^=CDJVyzYC(dEc7xJ>Wgl-7-e$nsJ5KY{v1e8E5sH^>wVd#jIJMv({{% ziK=PVnvYp)#(CeG@jYP8bhnIAx@KJAHJfpKYsOi&(M3F!33M3 zE%UO0RAL|%@WJ#cg=T^5IyDosyLg3P)dxKSNpj%JktYcM3BH_M3rrKq5%x{xt61X4?vc9+`*o>-|DljL0h? zL?)t2`ig>Q>br~|f88YvLTW&kfU^*oCHD+e?9|cS;|KP^TX6?6t=!JQYn(n|;o)C=-LT#K~@JHhox%`g#58d+n-%_xD{ zf_c0F+02UAMJ|tb7UBDOrm~4pjK(d~E6pf%W<&rKwF!FSm?DkW; zw<2ZLy;z75e9?21VoNtcB-b@+$gQGCjQ(DA+TA=uV-MX|f*peDM`5+7d zY`NX298&;M7}d&&?mhWbicEPd)F)^UpPcy1syjA(MmkIBq7{o?B&t^c==x_Xqsj8r zG6o(rU;voIg)j$M_DuS&NGO+T2_=`xWc!`AtUR(0;zQ0{BD1l|#=P@n$J6c-SyIkW z)Ocy^kgs^0RyJ5Dx$HL&Jz6dF7};FhF(U9W1&lyR#*QGQ$R&H$AZCuu5LLtFWDrlt z_b`DMSJ69@4zh9=`5puFh((f!+5i;rlz;agOThPty@(+!syAsw0QU8)DS>~g-FFe- zU#sA>peLpP{Z!k{D7-!c_8u^GD4{jOP)tibKR7?da*3#-LS}99-9)u&OWW4L1+KqK z0FW0S+5k1yA0O(P*BbIa*K)_H4I`D~LnkFpFIhx;d4eE%{uv7TT+&H7FH1VBZxTxb zaFMQx;hq>vdJuSTdQQi0`a?Z2Q$QaR3Q+=+cSAQ^Ofqp78aLqk4C6P?fe}di81u`2 zm*9_VPOc-x&9{w&OsvC|rSKL-cXpTz;lsIxH_OW?a0Ix57Z5Q!*ps1-lPz3misr|$ z0BsJD;^Bxq96Nqs3RLl|p{bope+k91qsC-GnjSv$y;Hm2aFr(3sM>u6(6x)&eFi`M z^B`%cLTL`BdbVlPp&D2VE`VekWhE*16M`mk0|tdYP=S7mg?HX#!;??AJ%~Hche-i2 zdOoWY{0)|^p^(B=jBQqqB4Gnk}5jS*uMP}@?AqeaRhufUM1D0s^rUp5b_`5NQ zkF!oaHP!j|G}YDAoRL*qR4ZEjKM@RaEP8|A#N|>pZ8pBjhC-j&`0p`$C8~Bd@e>$R zVk{<#f^hp#WT6#m8x+T`qSzuA(d&r1cuV&M*F}{x1nPn_;}g@;dWm#r({fx~URK32 zH>SNY-;Ecdp%`AT;sZaJvV_^*MVwh~x5Fn8&koA~%Jzdnw|A1OtG%OoNFt3EKv-ps zl?m%&n;g4dE_y5@XZi_or{#mv-N5gyKwE7-h6hcQurk5(dV*Sxyj@M70Eab0;jez( zN#n!ml4uHg6+V;+Iq${$_xXc7R}fhWtX-st!b+jBvJF{EFek`JTrpH#i>0)*%kdc5 z8!%0IaIPeDU^6Lriee~1W7&#HM=^paJ-!hXMselBqZE5&1m(!TT+i_qZden}ApOG= zTANsCwX;-feHp3^D9O)f*#AueF6FE88F*xwdLQs6og8ziodR~2LRoivKp3j2L`F7q-rK5s;UdtI?uFHho6vv(w4{oV zM$=fgO(>-al~`y@HAWy6n~lFrFqZ656D_z(zlv3YiM|AdW{I!6A6IR%qBV@(RK^*g z8zrn7Upv?p{z;r;e>k`~r3v(`O7*PKfna=%2pMf@ow|2uI`=?Ydv6jOU?J2Tf+nX4 z9OFK8rjlG*E|S()wmEsu?kOaMQbc)QJGUKyrM4x|+knt*MCT}CJBZH35cUlUr~Te7 zI?0ogJ%c_|wvKqr%_n%%qoH`1_kOlNyMpjMn_l_^;X_!_`Jk5;y^EU=&aiVo1PStu zF@FA z!5o4xu#J5(hYz`Qe}=bE=R5}v&m`%B{;FGa{W+byP?0J`fbmHQ6{ig%5AwtTqmT{R zl_B(d5ofeJm#6AEGXV)ARNP9Rh^wCVdon1M+s1iV7-;vL3Zewd4vgEMqVd?mdrotF z>O~doe}qtr;!#}7f=P0N(9jf`8}?NzrP=~UbnyAeD2SsHD&YK-Y+4o?G+>M}`xXan z5G#b8OKe;mGzYm|g70KoR=GlF5GEJnFuACl^F#!ew|Ns+6Ji$6f5Jl#K@5Qrfq^O5 zE*5tOAPEvD6G`}$7gMcY+PZfV0rwavfIE&pa=5r-x2&!rW-PXAISPlg?+X^XWm)!| zjDE2Go;L=b3SJfnY4|oYUgzW2v@0vrv!UH9edxOL9(N#JTm`$OB6sR0``;?bFQcb#yQ4Fg^ zFw;l_moY*ZG^@99sCX@Enki#6-5trD4f`L@;1L=}8g&69`yO*v)`p@$O&pS4uP(te z@h*2r1bCP}p$E>fh=^coE9ZxLN`Lo#&>0TN*6#z~>3!h0YBq22Qof5?_b&R}tY(|H zi0}LsU5nWqzJpr#4*J}z=9)Y3-Msh9hMS?jUe4hB`^iUD`#5#fw-WrULkUa%puh7|#_z*39}V(lnadB8WoO%x%FO zpVnzIi%nmdB4GPvdLFa!#rT8IQAr6WpF+@PaqHv7gPX-KhfSBo|6Dva0)ri9UW7FH z@fRK*-+8>aamU!}BRGX^`-&$vB3Sg)@tt)4;WuR$KXe75ZCJK1nam(`bgKu?Uk!`X3zD83d1$Rk6^R@Xidm zp&b%&?&c-fN_L?Zwc~XWsq_lx1q=b&KZhpCQbQUqPULaWCV{m{hfM-DyD*iYB!I6u z*_&ROLCt&Y&+SMgyz>PbZ8|k~(O6P!Aeqgk5aLu15r;lFcjA)0I<{mMnzAT*l1fMw z?m}D!@9S5o)2*77!LzU;pXnjxF4@}#g(>9>^!Gx&z|t2IA8W2`MPR+gWS~_pI@MAo zpgzce%2Bq%^w8_kESo{5lms@aNI8_(nn#wve7eZZ0(>gWWtAuZeM8v!J4^WpZUDZm zBbD;OIHW)_B~b7^jIdflY;h}4Ask=A^vN*G-dmU>sGl3gALDveY>a>bg5zk~ZJz2g z*uGn8kooSrr3NQk9Fc2kK&m)5r3S}5u6mZG1|`FweQ`j<)7o?jI~KsW%R3fyR?p}4 z#tPt5WM*C%A5g^#xn{-9+hh&?Rby|H(Oqw1;pl=Vrd~WR|04Vsg?>hY-O ztxja8&6$5QtUZ5(SFSawdy`HkommMiIYUE_TMvl_KrAbCpz0}W&_^U8%$s#|$4-=) zEbiYm_VV*55B7vf%*)8d2Ecb6u!Kw@PI^43~!kNMxWYy^5~l`GBDcE%vO&H0%|*`rir{HZDuu* zw}uNdeia(bi)SozI0b@CgA!fqMt<`fuoc@25fLfDG(7c>38l#I<~o^OJHLd|z#B%} zjW4m0$Vc%fLZV9c&9~WZm?It%6}Mje-7tqzPXD{xVeT4z_Lbt1=Up@vp7l*aA2vsk zzxlL_L;S{kI23Z0;z#w>k7T0@N3F5tg&oZ2)VC~1#cTNmAr+Zee5R9$HpINSB7!}F zQ7Un%N(g2Q)srunWfjH(58`S7Y+jyd8`=L+ia6vLr>8)xGEW+7V z!p)X)70MU(L4iVdELc^TIoP3%zA!JR+Va@VrEsbL+W}O3i1f`7$*rYF1IxOHCg&zeCLiY96F!2Q@pXd6=5-QbY8?d6b&R zsM$r$lho{?=2>d?QS%%%FH!R{HLp_h18QEQ=5=b`pyo|#4pVcKn&Z^GMa}P1^9R)Y zAvL5CIe$zI1;IE!qUO)2`EzRif||di=C7&w8*2UwHGfOZ-%&$J-p(jBW7LdObBdb( zO3lxx`ES(xJvIM8%`d6>M`~!xCg-22`4u()LQM!imqVp^x&Em`l`I{qL&!DNxDbmo zlfF{MyF)q54rOXOl(6Pd77?d@I23vBv{FL>a1Mq3aCC!1E?$Rh(hjM=4yhF^7CA(^ z9fB3hvQD($Ve@+I7Pt-ay{Qj_CDdf;Jz!&oT8h21|2IWqm&fMEmd37%Er`7*c4cgN z?3UQAv1?*C;oEZjeKhv5*k!RBape;Hy$AnW@qaP?E{}aac6;o)SUav?##h>6SMzZ} z?0S8s3*WE7=gV`c*zeSY garIh!y9WR1p7&zTJ7XWfDA&Zk5c?w9t7G&3e^?qycK`qY literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/pytorch2paddle/__pycache__/prim.cpython-37.pyc b/x2paddle/op_mapper/pytorch2paddle/__pycache__/prim.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c42b6b2b1bd8d4e7e926743fa4c4b6e423b02387 GIT binary patch literal 13490 zcmd5@TaX)7dhXVxbsf#c#x};wDrRSQL~MC(_81o|>@K;GA`StA$$+Dhr!}_58cA-o z%y?*34ZhUE5*u&9Lc%(ZM43xAU_)(|7~9~=dnylkOXnq3j5OmsBrmB-ikIa3|I^pG zc(7}WVq7}ieJ=kwefqzA|NkHVxT`BJ;cw)tgE{SwCFx6gu>S(Mc?7@OMHH%}$|Y%B zT;=fqo&%-eK1mI#p;OX$NDZqBu3S15q9)WNu2Hp1P2n0-yVV|C<7%(k zhigJzr>@5}sote-z_m-=sBXeFrQWS>#18e!S5&d)uOT_8B$X+0xemU@0J=tT{Z&E zKubC)on76+LW(b>(D0dqj^I;gfO$Plkftfg=I!88z698sQdd)e!2@ zenio(=!zbq`;&4rrlM6OpKrx^v;w&AH}8rCihVWqsl8Q%~eowN!XKf4rdm*Y&?xy!`Is&rU3y z_^f^D!ot~)+gGmK{ON0pzj?Jicm2z`6Y+Q&fBP!h#NnqVv|?3XxOjE`i&yt>H8u4Q zr}w0bWj*~+x~dhY_w4zJHPLg&@}+v=q3uHl1~N|X_G>3PzxcR)?(gP5d&Ng%K+L=F z$+h-{R~O#+L@Yo&wlBSn?qaE8`uQs#&0o2SSRdTLT`)tXVolc)w5q6HdF&b5DKn%WuNKUBu~y_SHBm5w=wvD=iseJ5GFj24^ST+X zRZB(PjN@x+dhLaxe%K75{xH3hZU(EB7tC;8*R`4%!*|Y==rdSqazN1zzAJ3O3^W{T>Lpz=w z7EF~>3&#qjN_EG~aFw}iN2Qt*Gw-M#*NKhn2HwExag%p4XLHSZOi&v@Aw^Sij~vCn zIDSbv9!Sa?0xmMZwMGMK@QUm1{~G(ts)KYj|@pd4EA z^MOXB5#_cR>H>yrL`iuV5iVmbh?R98Rs#=8de8`y@_;bLA%bBQ&qhcMoq@O+aU;gU z7p`w`g->vvU-Z6~aE*?P?1FrW@s5b8c|AYSkI$9ewWeq>g#keTq9O!CJlf(xA|j}z z-Hz?Ef75>dRktGx(wB4RI=}v)bLzD(=YB3eI3%(2RXj#vvFpG!qIxZlnH2Lu2?WKiq?gDvxlWe2$$f~ zWNzYcVd6-R>$DVgQdrh%+C6y4B(yC=le(7jm zNYcYc;@UGZ$jQqvv=N<|F+z_@2da&j z9y6jw43_eQ9v63wxSlZLdUC1@wev>2nQBSRZdld0-ebh4dMzvZPkLW-oe@_KeS(alfF{% zU|lcF-)MDSy8N}|Xn4$#qK6$Z`h$Z0BhY$|RPsYx4;J&atp^7B$JP|7F^`y#Je_{m z6%%4_`s>Gxt}w0C3o01%3Q>@=8fh&vT!#+dZ9+2ik>SiHQ?4<83#lMpCNonjXu28u zy_tzZRWDY`W@xHXEDK>#YHXv;$l-jAw#kgZ^w5pg%j8)BYe6QYqGjT+87|kSp@oB` zLK&vza2_ToP_CL0t-udj9}iU74k-B=UsIMU&sFnUe!8|y;>@Hi7i?v^CyRxW%EFr? z!v`Bjwlc?!FL2>l)SrkW1i6fi5jX z>8}f;Kgp6}L?BgBmNMB;AVFa^EOIld24F&>MHzAk69Ss@RIC|isv(St@hk~@7RZX! zOp>8#1z@@ojif3!x@HFSE+ctThOBNuZK~02bQ`g$pc-zbj8sd8T%{UaM=$M{j99bV zNVH_0DbN&j3BmR--5!f>Z=-J}3z~gQbL%piU8=GXG&f2_`<#3fdWI?YT9h%jcn2eF z$?*TU2C+EVIs1P5GUtV5u^3WN;wkqD>ah zp}_~d!}EHfyl0P2qlaDj=blw=te04)Z%V%6^b=hpz!Pc}|o<-oe?5I1@DzF<{l^!@)sgwW=ij(b^FLvJi zsPosY`D>s0pn<*Ju0HdBK7lD<`sc5lB^&|S&W^S(f7ZVGhE0dn+-!tReQ{FzJ|yhs z|KV>o%EW=bSpjBxgdPh`RI109slIx-coaA}DW<3a000sbR%E+xDs04SS_Uu^3RTY?zeDHyFMNm1FzLEAwSK1LKa2rVUmNKULS zS5(bzSG@_u(q*edUg5Y{+u(Ro@Mew?`FIy!1hLxScnAu4(3FdgvV^g)Mh;mchgKRn zv{Xa#rsp1;tYOKuZ78H(Lezwy2}AclLH42S0VJU-;ca^Alkdfy_UGvD02A^I9CWSY z!B&jI)ppS}>TP7LK=~2sj?Qd_8iQjE1+@XnB*rEmc)A&c!hs4ox@*5gZgmq16UaIN zC8+q2b;5{R$og(qk&r#P0}Zm9P&KUVa|ayV@i-{47oycCAZoiwig)i^Q-(lkfEED} zqKj~)g+$oQ&uV1%XnFt>?r5L>bpG0#R_}!mKb*fZH-F_Sivg8 zu0q;Y9A=3c^Wck(S;nAQUZEFVM_ACbAK)WuPg6lMAta9EMMSXksZYP%&X*u%p1lIn+C>M}C3BdS~%qvz}{{ z$*n^ztS`HC#CRdL)?{;oaO#9Rr?68OW!o$6g;VFUUTi7rAu<#?DH9%c8I-8NrUDaN zOu5r#N`X^p&*2`4PkRSEIy00Vb*uoGac0=_>|@)f%hmkEk%9ivk)3NReJ1Owaz}a7 zahPj*;Y`LNoEDJiMR!ugb$Kac;N6xniR~1%=us6jGctDYOFqqyVLIPM&$tWw%EGu@2l1y-AS@ zFfyE{XVm}#hSF5zq`VOzv7Aza-e&|3qvVRQV--6R(awQcrKy+^2ir;^j8j8nQb5W@ zEPDUTD`D+R%uV2}kyuhk6q8IPiM4j&O`xZZgwbULS)jTQ2ML3|0)Psw4hJ-7-yPlM z(!z|PjneWtocguqk*5GZ)v(b8&>TA?s}YX9c;*5ar9M$3WGJE)v&&N{a!&!)V_1C( ztM5VNCdDwiyU{bV8~feE7HS*sbg$8i(SYDb0_SAyIza9_s7WE>6L0n!2=g-VeFt-; zntet$17Dfi#VkgURYRhlMBLB{!wq_yn@;=TC#LsdavLb;;in!e z^L7O2YAinE36kh^pX$ck5TZSaaPwqF=lSX8mEoex_GG26<8 zZ0Lvsayx%{xqa@v#T&nb&wg`m&J!Z0e%L*a&n~D@r1{3B&KsZMb1n1o`)54;eP7nm zssxF#Ei0g5Gmx7xIju3)C(jg8mC|sL_ zHP{5ZK;Z@o_67JZg+L!6>^_VMA(jnvT?AdnPWT{>juzWad>wR+z7)51(`EY}XB(Dk?mO}&z7~dX5;E8jXxEJl0%r*c*qM=(2fUXM7dIzjl(GM3ie|UyiV9%bt z_33>TK^8%pv5~cz&nC4L-}-ovD5VFsTRiA8sBpUC!s3xNV~sw}8a=lxx4|hk?NNO4 zeP#klxf%d({$QyfY{8FE|JA*drFv1#ijYQjLMwoD?og?6Fki|Y%WD}~E1_G)g2e2` zu__N8ftp|9$BFQ95A|QmquYi~OWitx#C8;d!oN!$I|l7J&xYSmGC-pwO9t#@M-Qbt zp!-sA@REo=i0mHeN79f8!&WfnTc-~%U{RJn?ELJPzzpr{Cy;TzeQvcgYni=e<~q8RxuU4x#OiX3dx#P{IG)4l)v$YLNciJcdKEUViGWcH2YF!Altvio z)lEpRDoC$B?@6z2LV9%*Ji`!@ssU>RSwRU0f1EQ1bh@e;qx36|4Z(A61vZlPOiU$J z%Kt5T4!em?sOf3Lm4Vu%+!!E_CMJZ=X8@4qmuE zXEPssGA1NC-8?_H_})w69l)p7@g(Xam~HO52Mp ze^Iqg@6>QChm)hmv*or7BXQA&v;f{_CQVz`@<7Z-&hO5;}x;Nmk z3WqWgJG}3xTBhQ+z|5*PS&v$^n3m5i*l)6obBX#-c6W(d*Sx;ZC?_^Z^QlSw6tFw6KN`XB?xwj|h9xL^Ih@#Krpa z^%*yMwHh-K?EzkyOpvxcMUSN^2&Ju##Jh^@QjB49q%oHezRuL?}wE z)vFV~`LvNhl>2 zw8h~rdpX*T7rrD5^~<4~W7tcN$Bu{tLp}r--;u|Lk3M{NFvd%(1ZLkhp7` zsbKUUUYbT>^q;2oCHP*ryS}&SK_;;=%Vu=sS^I>B?@yMrpv>c7dQ$gtnjbhXsh~vPFMh-N)3L0TPjf|^C+Ds$v(r9MS zviSx0=yUYJ>55t}6@J)_9yLO^6zq!zqv`0D=zY;lbTh6GNAHdf;I}!t6~E26+w%Wz C;Q@94 literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/pytorch2paddle/__pycache__/prim2code.cpython-37.pyc b/x2paddle/op_mapper/pytorch2paddle/__pycache__/prim2code.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7d1196f4b1c5ba7d05d941f9bd38aecdf099bd8 GIT binary patch literal 14460 zcmd5@U2q%Mb>0Pb0T84lN}@$dwk61xWxPbp)u-~Qse=iGD7{qFUbHf_oq_qR82>;U9IfajR8hc_< zm?HItdCm}NVZC9TGle~8%o($3kr7!OPidSJL%?Z`^I{m-(s)E{0=6~YEVck=G#(XO zfwLNK6Wf7v8jp$FfQK~RA;y988c&Fwz{48v61M}7Xncpb6L^!xyTu;h%^DZPUf?Yn z-zDw_9@TiCxCeNv#{0#+z}qxFAd0}-HNH4~rwf;~F0o z4**YS{GfOUc&Ek>i${QWY5b^o4ET19kBKh;-=XnwaRT^GjZca%0`J!Nlz1F?kH%jT zPXHG*eo{OIyjSDX;%VT!G=4^W8Tf9EpA}yL-ly?b#dE;-Xgn>x2E1S6=fw-a_iFs2 zI0Jk@hNT_=z9CFtu3or@Uruxm=qq*X9l%nR@x?^JO8bPDwbIoNB!>wRE(>2d3(c z(jvW6PBoVOx~$BHCmPaSJW8*aY%B*ub55B}21!Ey2$LPCl?ljIAJ{?QZ{)%}94U0xeHGhJO3m^RI?GM)8syBRh z(QP^My&wMIy}$XF4_4k?|Ni&gTBGTE@Bj3TOXX_QdH?NyTmQi?-v7IQT>tUEzW4go zYrp)5^|ycZ(bYGfsMkEdT=P#iJx6|Y^?M&(eJf9mO@G&}{xp2f`jx+5fAfcB-^Vp? zt$*+JYrp)(hd+D$gLl5O{?9*u@8@seE$ctJa_!xp{r2_m^6k^bG}(fFL6C;M0-Nsj z$a>ZCvLl1^g0svvP@55up{Dao{A+Iq^fK5OR^H5~c-Lm*_yFH2> zVXhizB}Dd}hM#J~jaE|pZLOr%(r~==YTAeU&A}~&eI-RMCeXIU%yetya;I>jfXzW{ zB)umvmjZLSXi9nqTr4wNmy2aT$kv-ak>XzzWSk`*F31)w+HxzdC4;jeAukom@ms6*biGGBgcW{1Z~JDlAK)bqz$=sybC!=*@Hlsabq@bVOt}ZgdYBK))|*4d<(lZB+r!gN^1K;}UAxxA z1H_QsS=qIaWJl-Nu7|NhcBLLKQt)8*%bj8X zH$yPYI#p*8(J(ZgrAV>eZidgWG8u>6$M8eDlq_=9yW^C|O|`5327p4DK=|Uge>Ei4 zv4llu5bO@|6>!{+g-&bvpe+AZ*d2T6uC|5Dv8$75+aj}Sh4;2IYpF+!j(zdeSp&{( zub3jcV986k$`HA~MiA)OVrbQB+u*v~84j?13=HE0OSpIUu2~y5QxeaUsn}e97*sfv=2ba1maBTVTkDq+LHyA-moJd*R;di8RrkuoeJzahX2e?!G6?RZXaP5H3VRBeU=U6i- zh*L!&sCtNpKI%G@1SkY@g~3;4^!3^6uV3So04sB_M3TtGY&N&?+!?8<3t7IhQu2@&(VEQTj5or^N!%GIy&DcJlLa%Y@l&>pow>(vB&U{M{umCaj%OwXgl}~@gApsbHbEIlVk>#91uoTA>h_vSw-zGye+V~mg&dvrlaXd z$j~plwOJxaA=7}>I0HV2{r(m^fHU#jzh)+KLptWTD4vIzlyEElfg)21X>{|!XYs$j zeSL@-(1RE8le+d9HRVIepFS}A=t6Bun};fujY{3Zy(Eblbrv!V^4!DN>oLo~xM|*4W5{LMZqCJ}1h+?$+-?{oIJQP*$!81_R0f5<7Y`CQ-G%a9BF%D@azqp%9=#!x zu=8-P=N$6qJI{nHyPi{ck+wQdQjI0!Y;Lu(QgPe~^YBVU2HlBDtsa-VWwWRnc(z(E z`%HyKJUx5{XdQmbn-L+z?_&cew4Fm7259q>cwb$99<1u}3b6`f0|qT$Nb(wdY?#{U zz^C#qHzK0nkKzmw{f=)KLzSd1cV4;NIj>?LRjHJBg|jr!prP_Eqz2b_7Jotnd-5)% zZb#jA~mx}Lm{wk3R{S(%sdRzw*BCP|-e*wgYoi{2eP#j{+Mq{hCeZeLn zh4i{B=Buerrju=_R#O<4QB9u4UY7Uh4x8@qF@n30#W>BvIPE|C-H$8O`LujTWctVD ze(zuz2VzE6Rp`g!p-lPG82EK-@?tJldh4n?$jnshl?5*tCHYP)&&hIQo@G3)=YtHZ zRvvEFB2P3d;^HW<`3b?uOo_!GuGfPtGo?`f>07`nQ@OM6;0JoUu`$e1uGpz+r+=GK z$&d12iR<{&xI&wd#u3_tp>hz`@t-wCE3b5G8<`1|5Q2C72YzvsrJqic8CX}-wUq09 zE`81Q-X&&ajBcIyOp=>66xAm2@;x*Z49C)4SFDUAyM$Od&bclc&Lh_y0!dZcv+D=x zO4SQv=HL{&EnICyFaB>3>5ZMwCP@#>pjxMdL2D2^!ok_b6v(QsbUnQ3f3G9?l?@}Q zz4kciJsUp)s5<{g>|$Q~)g(9VwYL^tATw=XNv6UJ-tlXqqh*5Ul4P_uzKSz(L5VdF zNQS+F60}xBkYE_eOg76#DVk&G9*yLpgtD&mkylhP)7p+KP;d*76(pva9BPbXN-659 zNTGV`7Nd?E=|mIw!Aft}8S4g8JJ~6z5~-RF?Q@i5VbWu}ayyAc^iNLXSWjBfO?9Ja z#Zl`onOZsi`M9)yjp)T(`fG7&Bq>ti!?NNf?D#xqv|{piYQ-EU0Xn^ca%E{h%@(bg zq4>N?1q=o5ER{Iq6_~c%cNUWNYOZagV9G*-%)*m~s6ayTlj`J92Ic+XcCO!?QweaxK|MMmf0=u~M_vC|4GK zPyAFY*=5SJaaQC-n$;`-d5?fkU_ztIjTF?j1s|~C10;{w>;qk1@Di*UK~bD(XD(z= zMq|obSX@SD;Weq%bhmiMz3m)%T6dnXGy7gro@9!12mRJ_u zjZZ9iM;Z!Alvuu*Bs2JE**g=vQTpa$vzjAjv2b~A!?@|{X7A@y-PD}T32v_>x#>=P zvrd@fxP5h4fky4>RGK=iRSwggFx7ess)~`QuDbd@UFljO)IP(ZdH_4DN#C& zYPsUjgZB|VC6>{=Efl5Kk_2zLNA2Vur5th6^O1!h#RwOx7?c4A?v~p)@k{4o)0K|L zZ#x(_BXr$zlBF&oPvK0zv3yt^^LMliCz>Zo3>Nh8_I}FiQSje^=kr<|N!n0o*nxx= zHF+n!=FAbIn^=D{gV*T@Lq^qxr8E5~(|qyLZmQ^%M7Uz2avnkb1NjdNHLQyVu$+$; zcNikGk`mc*gN~v)qPkkG%1TVQtKbSpk1Q{z@b*^v7|KBM_m9oZDA+a#+>5^x$M!qt-HbSVdFuAB!4k0-irYrLMp|x{(GV#`R9B58T()K+wY~J6etR zI8jm+E+%E1B$?~t@E~GUV9|6juac4b@y5`6w;HP=W)!GS6SJN`HTS9P;I}umc5n{| ztl4VQ6_ZLNp7iF+4Ibj_0Ug)v?7Si2f%IWDZOE^qDre8Ns zMy{~KuG!pKqX?FbEi%YWx6}+AXQg9^Vf5+F!D}#)s+BlHA_j>w zJt7865?Fdl`jyUG@syR35l(mE7g#vY9~Toz8GgZ zkh~lYB_;(e^o;6}(DVa|=Bgs27cBMkcUpp^4L1-v6P&ZKWVqH`Y%CurPWp9HzPii9 zdK9*SFjLw2QdzP9-HDqE<+T^**W;Kc*)ivoOR6=|a7aYA6ltuiu31pXpuX~ybGUFb zF;?1w)vBIkG~SL=M)4tDW3N=bJL#FUN?sOdb2n`! zX>)`&B(=&1Y4Z?msDsN#XhU*7FDT+gIJ{zo7j;N#YF;29Pt)Ghw4oY7K1&;#^O7_< zBxzEGrwb$r>#X*&I4ViulcbDEl8_{6Es}H!NdtUIBVqX(ZOXKvK?Il2Ty{#T&Lq_; zk`lF~1ScsINQzn9oh5Yw_7q7?606iX5K_Vqa>p0zqFHrLl4U%4Tz)v0AI*&B@X3sh vj}DL8qgi|i+5C_8viNM{qgmd!`T9JL&?mfBU6;c-dPa71h|kf}am}9qXii}h literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/pytorch2paddle/__pycache__/pytorch_op_mapper.cpython-37.pyc b/x2paddle/op_mapper/pytorch2paddle/__pycache__/pytorch_op_mapper.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d04315e2638604b5a5c1975004aa24888a1a42a3 GIT binary patch literal 5896 zcmbVQ+ix6K9iB6{ot?efaU3U2Q-@3GhHBHeRSG6Vy(WOblr|_~dKpdjjGeW|v+Fan zVZEKLP&bg|At*uu@wk?dfL9*)J9vt!;gP-+B>sTD!1p_6cfE0nDzWyQIdkskclo~G zIj>bJu7=->pWpQV_)Sgw2YpO`CNi(!N!~-kHO?ZNWitlZ;$#sCkTQo_<%`WSnvRj^i2J;N~68 zGx;%Y^THkGS$v5*yoj95kMj~QBUj)RUPUX1*LWT8B46eUd=WM7UCq?I5>|U6uf4T@ zts`2uri*RqlgVij(X676o9 zo4nmh^Ga7r)b9o&*iG_sr8c2JnVZlzQ0BK@Z(@N z^xHfyCfx`#MPZyfty^Jh$M1A=OSZ|KR0Ov}k%SE+HxB#FZ>%W`@OgMUj5^)({WZ0L^PR50D{bZcWYN=# z=^kbcn%#X-giKE2NtTgltjZkx*@u66nGIHs=(RZ&;Y`66`qD#6PCiC*SG&tLb!B7s zq3)4(pWTC^M^G0j{~mH9cAsr&L%o0T9(zN3zd1BAW2}=s9_T|eg*jyANKe*~vs5nA z$986r%J|7dBd?e)`d}XnDu*B$tu&zXhAo z4m3>r{1A&7vy71~($M`Cu_ca%{CflZpF}6lG`NfnCIpW ztRS~g2L z4K5s2!PK6bmoQ-vC6}8=#vbVF>*554kWMf-ckNcVN=sb*aJbrzS9h_nXtkZJ#+`H( z?$PatG~}!MVcKlCxhYp?Bq5e4!fq6_!n|-RNXYC3NmAI9oJNuxWZ%U%`>n8>wmWfd zZ+66PkTxuFoT|&p%Y13}-mzXhn`|Zp9d1$F?8PnVIZ_r4T?#^cg{F~mSw!v|c#;() zkc(}@P|M6@F485)s0^ch(p0B%+Gvpx&~E_`$yVCx*8oxTwmLLYD4cBY0c>zfyRYG4 z04tX2+r~ERQT(pYVUKKVA7~@8+iUOXLu+Vf_PB7M4GXD*T#@QvnTE(RbL<|LjZ54< zV7zcrn%drF31enfK%WZC&%jvDu()^my4L>{=BP?D91IItVO+!LZsv~bgwF7gQo4{X zW+klXpXo7`8dsrkI`dE_86mw%zar8c-cb6xI5AxGV%L< zBzJ!NK%4FU#$@;U_A)Q+P^%9zi;yqvrK@c$HOh%@$Opta@+bM_@i7^VgRxm8}(8U{vJtmAbC=-LPuPaai zWY}RJ1zfsX!xT?ry4-HZL`^^s#ZiD6Fb*0owR%E?aq1K7_|w|5VkOn-5NYzIDPqjm zU%~`Q6^Uj$a4i>3Mw~FXcocx9{PiRN%?}4wQ=|w=b5%vU-wkv7S{NrCa9#7W|KK+R ztK1b~+7s~xHZUksRMU()ovtEsBR6`W)iNlWHW>x`U`tDqZ*GNqy&%dJP&6-Wg{j0; z38&wtB6}wkaTtm3(npoXl@UpZsS{DA!n%)Mh&g0tu~w6w}Yq`CZa|) zgm{ew3CH3b6?})17b%e}#oC_TmL25A)D~$twdUuM%T2=PygmVQwJ(ZsN^|Qt{E)0C zmoi233pCssB@)e-kXy$i!%|JJ)(FW>y~>thY;|^;>FgA~2;s>H>o7v~t{Y``3aw1k zIY{-%Uzsf$b+o2&s?65wYz6g8U%&`c!<`WVg@vSz*J(X}MS`Hj=nFuKiV!x32t>vV z@N0n@v>iuW;h+P+hB0C%G(f+Fyt!kGx9Oc(Cs7uc5v*7oR0605IjEXT@O%^fxcRcC zBjQECgE|?|R4JeVQOH>-I0Aj>;xF8OY^MLwJ2Pa!le4~f85@wi{~1L;(s5wP{YFt5 zi1+~&zDY@)k_si#D2dAj5v=$sl14$9i!}5inW!{xZx$Al-Q^VpG5*AO-a*H_G%1sY zDZ`psY?CiY%bnno9Pk(DlB^=pq~1uUr0FukPM1~ns!q&KC4;j^0qBXbuC&OD=tpis zfvwc&pCAY50=gv31>pYyx?q_>T7qotRMGloW{GwAW#Pc4{Zil5sfE-Wf=+0TZ5OzX z(}%IexG{FpVg~(#%*_Wl>^)@hOL8dwRVqV|kA|hJn3X_UE~T)SfA2lod!)*^ zgYQSGh8Ja?#yIo;?-5>5SK&9Y)!rlU7|DcAWRMAV1j|>_h@YFUk#bp(Wc?dR%^tBB za#t-MAG1-~7*q)L*VnIx>8mii!5IbO6B=pW43e;!cKq9cXa_f=aNw!~+q)kQ-$&Ff z(ODv)r2y?B%)qP7GX_vgI!Vr6$k_!oU9$x$0QjekMTL39fZ`P-xgK}(Ld*jpf_-Gd zp9lT+1c-|`F)9`k4QgtG5ZdvUC{bs^*j&e46d_2*$}NGjTPUeqx?rQMoQyzNAunAi zc;$Ka_6jr!Z|~-o+^A=?`2|YWJO?=+_GADjQLmlzDvaf<*iPXPE0|mOabTqi# zBHT*>smYf0W$Aw8hy=Kc_^V4`b`f6BwC_1Ju_s~hJ%`Q$k+<} zB;*m=HJn3?6W356eu~69HsLrat_iex3y&3&g1qI&iVhjkRUN+o|HKBy{~Ml!4l{&! zB*dwxamrhLLTpEvzD44qABm38PKqu=rvJ9WI7FRS=0rx!r0>{?3yv}nqn_Jy!Ckm* z!BIwW7l{*r_=K5Eu1jVz>8vYj2qy`pEN;NhZTNXXdG3~$6}D7xY;B0^ScABMBJ{MsOzb6d##MU@p2i#i+No4Iq@HVL@c(HtyH}yDy+#Um+oy z5HwDH0DEVPfLx<~OxN>)aPm(a+oO-z|i6C_`60XUh8PBb}N z07||3l<#MaYSUB5gy@edgd*F2;YlbG)L307_@RRaK@7kK%CV6j96zcKxkb8sq5mrl zV`F2ZQKkz$S{p9(T;IpFMlYg#+4t!{plakTmmsGT6nKjFDUo6zkCYao;_b@h)~yvr zk?$Wayuk!rJE+)|;zjyOD1t(ST6K=OSKL$XMW^J{0Os@Opu4!n&hkH>u Zl6~%6Qu4Y?$ApA9kHDxbAB57ze*wCx#q|IH literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py new file mode 100644 index 0000000..d49f268 --- /dev/null +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -0,0 +1,4153 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.core.util import * +from x2paddle.core.program import PaddleGraph + +dtype_dict = { + 0: string("uint8"), + 1: string("int8"), + 2: string("int16"), + 3: string("int32"), + 4: string("int64"), + 5: string("float16"), + 6: string("float32"), + 7: string("float64"), + 11: string("bool") +} + + +def aten_abs(mapper, graph, node): + """ 构造获取绝对值的PaddleLayer。 + + TorchScript示例: + %n0.3 : Tensor = aten::abs(%n.3) + 参数含义: + %n0.3 (Tensor): 绝对值后的Tensor。 + %n.3 (Tensor): 绝对值前的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%n.3 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.fluid.layers.abs", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_adaptive_avg_pool2d(mapper, graph, node): + """ 构造average adaptive pool2d的PaddleLayer。 + + TorchScript示例: + %x.5 : Tensor = aten::adaptive_avg_pool2d(%x.3, %_output_size.1) + 参数含义: + %x.5 (Tensor): 池化后结果Tensor。 + %x.3 (Tensor): 输入Tensor。 + %_output_size.1 (list): 自适应池化后的Tensor的宽、高大小。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.3 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%_output_size.1 + if inputs_name[1] in mapper.attrs: + layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["pool_size"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + layer_attrs["pool_type"] = string("avg") + + graph.add_layer( + "fluid.layers.adaptive_pool2d", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_addmm(mapper, graph, node): + """ 构造addmm的PaddleLayer,该节点实现out = alpha ∗ x ∗ y + beta ∗ input。 + + TorchScript示例: + %ret.2 : Tensor = aten::addmm(%150, %input.3, %156, %151, %152) + 参数含义: + %ret.2 (Tensor): addmm结果Tensor。 + %150 (Tensor): 输入Tensor input。 + %input.3 (Tensor): 输入Tensor x。 + %156 (Tensor): 输入Tensor y。 + %151 (int/float): 输入alpha。 + %152 (int/float): 输入beta。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%150 + mapper._check_input( + graph, inputs_node[0], inputs_name[0], current_outputs, add_dim=True) + layer_inputs["input"] = inputs_name[0] + # 处理输入1,即%input.3 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["x"] = inputs_name[1] + # 处理输入2,即%156 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + layer_inputs["y"] = inputs_name[2] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入3,即%152 + if inputs_name[3] in mapper.attrs: + layer_attrs["beta"] = mapper.attrs[inputs_name[3]] + else: + mapper._check_input(graph, inputs_node[3], inputs_name[3], + current_outputs) + layer_inputs["beta"] = inputs_name[3] + current_inputs.append(inputs_name[3]) + # 处理输入4,即%151 + if inputs_name[4] in mapper.attrs: + layer_attrs["alpha"] = mapper.attrs[inputs_name[4]] + else: + mapper._check_input(graph, inputs_node[4], inputs_name[4], + current_outputs) + layer_inputs["alpha"] = inputs_name[4] + current_inputs.append(inputs_name[4]) + + graph.add_layer( + "paddle.addmm", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_add(mapper, graph, node): + """ 构造数值相加的PaddleLayer,该节点实现out = x + y。 + + TorchScript示例: + %296 : int = aten::add(%i.12, %288) + 参数含义: + %296 (-): 相加结果。 + %i.12 (-): 输入数值 x。 + %288 (-): 输入数值 y。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%i.12 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%288 + mapper._check_input( + graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.add", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_add_(mapper, graph, node): + """ 构造数值相加的PaddleLayer,该节点实现out = x + alpha * y。 + + TorchScript示例: + %137 : Tensor = aten::add(%136, %130, %130) + 参数含义: + %output.5 (Tensor): add结果Tensor。 + %output.2 (Tensor): 输入Tensor x。 + %150 (Tensor): 输入Tensor y。 + %151 (int/float): 输入alpha。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%output.2 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%150 + mapper._check_input( + graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入2,即%151 + if inputs_name[2] in mapper.attrs: + layer_attrs["alpha"] = mapper.attrs[inputs_name[2]] + else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs) + layer_inputs["alpha"] = inputs_name[2] + current_inputs.append(inputs_name[2]) + + graph.add_layer( + "prim.add_", inputs=layer_inputs, outputs=layer_outputs, **layer_attrs) + return current_inputs, current_outputs + + +def aten___and__(mapper, graph, node): + """ 构造与计算的PaddleLayer。 + + TorchScript示例: + %361 : bool = aten::__and__(%360, %358) + 参数含义: + %361 (bool): 输出,与计算结果。 + %360 (-): 输入 x。 + %358 (-): 输入 y。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%i.12 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%288 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.and", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_append(mapper, graph, node): + """ 构造对list进行append的PaddleLayer。 + + TorchScript示例: + %90 : int[] = aten::append(%_output_size.1, %v.1) + 参数含义: + %90 (list): 输出,append后的list。 + %_output_size.1 (list): 需要进行append的list。 + %v.1 (-): append的元素。 + """ + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + layer_outputs = [inputs_name[0]] + # 获取当前节点输出的list + current_outputs = [inputs_name[0]] + # 处理输入0,即_output_size.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["list"] = inputs_name[0] + # 处理输入1,即v.1 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["element"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.append", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_arange(mapper, graph, node): + """ 构造以步长均匀分隔给定数值区间的PaddleLayer。 + + TorchScript示例: + 有三种情况,分别处理。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + current_inputs = [] + if len(inputs_name) == 5: + # %position_ids.1 : Tensor = aten::arange(%52, %43, %45, %42, %46) + # 输入的后三者分别代表layout、device、是否使用梯度 + # 处理输入0,即%52,代表end + if inputs_name[0] in mapper.attrs: + layer_attrs["end"] = mapper.attrs[inputs_name[0]] + else: + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["end"] = inputs_name[0] + current_inputs.append(inputs_name[0]) + # 处理输入1,即%43,代表dtype + if mapper.attrs[inputs_name[1]] is None: + layer_attrs["dtype"] = None + else: + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[1]]] + elif len(inputs_name) == 6: + # %position_ids.1 : Tensor = aten::arange(%51, %52, %43, %45, %42, %46) + # 输入的后三者分别代表layout、device、是否使用梯度 + # 处理输入0,即%51,代表start + if inputs_name[0] in mapper.attrs: + layer_attrs["start"] = mapper.attrs[inputs_name[0]] + else: + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["start"] = inputs_name[0] + current_inputs.append(inputs_name[0]) + # 处理输入1,即%52,代表end + if inputs_name[1] in mapper.attrs: + layer_attrs["end"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["end"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + # 处理输入2,即%43,代表dtype + if mapper.attrs[inputs_name[2]] is None: + layer_attrs["dtype"] = None + else: + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[2]]] + elif len(inputs_name) == 7: + # %position_ids.1 : Tensor = aten::arange(%51, %52, %53, %43, %45, %42, %46) + # 输入的后三者分别代表layout、device、是否使用梯度 + # 处理输入0,即%51,代表start + if inputs_name[0] in mapper.attrs: + layer_attrs["start"] = mapper.attrs[inputs_name[0]] + else: + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["start"] = inputs_name[0] + current_inputs.append(inputs_name[0]) + # 处理输入1,即%52,代表end + if inputs_name[1] in mapper.attrs: + layer_attrs["end"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["end"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + # 处理输入2,即%53,代表step + if inputs_name[2] in mapper.attrs: + layer_attrs["step"] = mapper.attrs[inputs_name[2]] + else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs) + layer_inputs["step"] = inputs_name[2] + current_inputs.append(inputs_name[2]) + # 处理输入3,即%43,代表dtype + if mapper.attrs[inputs_name[3]] is None: + layer_attrs["dtype"] = None + else: + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[3]]] + else: + raise Exception("Unknown aten::arange signature taking " + str( + len(inputs_name)) + " arguments.") + + graph.add_layer( + "paddle.arange", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_avg_pool2d(mapper, graph, node): + """ 构造最大池化的PaddleLayer。 + + TorchScript示例: + %branch_pool.2 : Tensor = aten::avg_pool2d(%x.43, %538, %539, %540, %273, %272, %271) + 参数含义: + %branch_pool.2 (Tensor): 输出,池化后的结果。 + %x.43 (Tensor): 需要池化的Tensor。 + %538 (list): 池化kernel的大小。 + %539 (list): 步长大小。 + %540 (list): 填充大小。 + %273 (bool): 是否用ceil函数计算输出高度和宽度。 + %272 (bool): 是否在平均池化模式不忽略填充值,False为忽略。 + %271 (int): 如果指定,它将用作除数,否则将使用池化区域的大小。 + """ + if "pool" in mapper.dygraph_name_id: + mapper.dygraph_name_id["pool"] += 1 + else: + mapper.dygraph_name_id["pool"] = 0 + pool_name = "pool" + str(mapper.dygraph_name_id["pool"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [pool_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.34 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%538 + layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]] + # 处理输入2,即%539 + layer_attrs["pool_stride"] = mapper.attrs[inputs_name[2]] + # 处理输入3,即%540 + layer_attrs["pool_padding"] = mapper.attrs[inputs_name[3]] + # 处理输入4,即%273 + layer_attrs["ceil_mode"] = mapper.attrs[inputs_name[4]] + # 处理输入5,即%272 + layer_attrs["exclusive"] = not mapper.attrs[inputs_name[5]] + # 处理输入6,即%271 + graph.add_layer( + "prim.assert", + inputs={}, + outputs=[inputs_name[6]], + type="eq", + key=mapper.attrs[inputs_name[6]], + value=None) + layer_attrs["pool_type"] = string("avg") + + graph.add_layer( + "paddle.nn.Pool2D", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_batch_norm(mapper, graph, node): + """ 构造BatchNorm的PaddleLayer。 + + TorchScript示例: + %input.81 : Tensor = aten::batch_norm(%input.80, %778, %779, %776, %777, %780, + %exponential_average_factor.23, %766, %781) + 参数含义: + %input.81 (Tensor): 输出,批处理后的结果。 + %input.80 (Tensor): 需要进行批处理的特征层。 + %778 (Tensor): weights。 + %779 (Tensor): bias。 + %776 (Tensor): 全局均值。 + %777 (Tensor): 全局方差。 + %780 (bool): 是否训练。 + %exponential_average_factor.23 (float): 用于计算均值和方差的比例。 + %766 (float): 为了数值稳定加在分母上的值。 + %781 (bool): 是否启用cudnn。 + """ + if "batchnorm" in mapper.dygraph_name_id: + mapper.dygraph_name_id["batchnorm"] += 1 + else: + mapper.dygraph_name_id["batchnorm"] = 0 + batchnorm_name = "batchnorm" + str(mapper.dygraph_name_id["batchnorm"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [batchnorm_name, output_name] + layer_inputs = {} + layer_attrs = {} + layer_attrs["is_test"] = True + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.80 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%778 + weights = mapper.pytorch_params[inputs_name[1]] + mapper.paddle_params[batchnorm_name + ".weight"] = weights + layer_attrs['num_channels'] = weights.shape[0] + # 处理输入2,即%779 + if inputs_name[2] in mapper.pytorch_params: + bias = mapper.pytorch_params[inputs_name[2]] + if bias is not None: + mapper.paddle_params[batchnorm_name + ".bias"] = bias + else: + mapper.paddle_params[batchnorm_name + ".bias"] = False + # 处理输入3,即%776 + mean = mapper.pytorch_params[inputs_name[3]] + mapper.paddle_params[batchnorm_name + "._mean"] = mean + # 处理输入4,即%777 + var = mapper.pytorch_params[inputs_name[4]] + mapper.paddle_params[batchnorm_name + "._variance"] = var + # 处理输入6,即%exponential_average_factor.23 + layer_attrs["momentum"] = mapper.attrs[inputs_name[6]] + # 处理输入7,即%766 + layer_attrs["epsilon"] = mapper.attrs[inputs_name[7]] + + graph.add_layer( + "paddle.nn.BatchNorm", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_cat(mapper, graph, node): + """ 构造连接Tensor的PaddleLayer。 + + TorchScript示例: + %x.222 : Tensor = aten::cat(%32, %7) + 参数含义: + %x.222 (Tensor): 输出,连接后的结果。 + %i.12 (list): 需要连接的Tensor组成的list。 + %7 (int): 连接的轴。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%13 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%12 + if inputs_name[1] in mapper.attrs: + layer_attrs["axis"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["axis"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "fluid.layers.concat", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_chunk(mapper, graph, node): + """构造分割Tensor的PaddleLayer。 + + TorchScript示例: + %724 : Tensor[] = aten::chunk(%input.170, %720, %719) + 参数含义: + %724 (Tensor): 输出,分割后的结果。 + %input.170 (Tensor): 需要进行分割的Tensor。 + %720 (int): 分割的块数。 + %719 (int): 分割的维度。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.170 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%720 + if inputs_name[1] in mapper.attrs: + layer_attrs["num_or_sections"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["num_or_sections"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + # 处理输入2,即%719 + if inputs_name[2] in mapper.attrs: + layer_attrs["dim"] = mapper.attrs[inputs_name[2]] + else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs) + layer_inputs["dim"] = inputs_name[2] + current_inputs.append(inputs_name[2]) + graph.add_layer( + "fluid.layers.split", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten___contains__(mapper, graph, node): + """ 构造in的PaddleLayer。 + + TorchScript示例: + %51 : bool = aten::__contains__(%50, %name.1) + 参数含义: + %51 (bool): 输出,第一个元素是否包含第二个元素。 + %50 (-): 需对比的输入1。 + %name.1 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%50 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 处理输入1,即%name.1 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["element"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.contain", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_constant_pad_nd(mapper, graph, node): + """ 构造填充固定值的PaddleLayer。 + + TorchScript示例: + %58 : Tensor = aten::constant_pad_nd(%input1.24, %4876, %42) + 参数含义: + %58 (Tensor): 输出,填充后的Tensor。 + %input1.24 (Tensor): 需要填充的Tensor。 + %4876 (list): 填充大小。 + %42 (-): 填充值。 + """ + if "constant_pad" in mapper.dygraph_name_id: + mapper.dygraph_name_id["constant_pad"] += 1 + else: + mapper.dygraph_name_id["constant_pad"] = 0 + constant_pad_name = "constant_pad" + str(mapper.dygraph_name_id[ + "constant_pad"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [constant_pad_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input1.24 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%4876 + layer_attrs["padding"] = mapper.attrs[inputs_name[1]] + # 处理输入2,即%42 + layer_attrs["value"] = mapper.attrs[inputs_name[2]] + + graph.add_layer( + "fluid.layers.shape", + inputs={"input": inputs_name[0]}, + outputs=[inputs_name[0] + "_shape"]) + graph.add_layer( + "prim.len", + inputs={"input": inputs_name[0] + "_shape"}, + outputs=[inputs_name[0] + "_len"]) + + def add_pad_layers(kernel, dim): + graph.add_layer( + "prim.ne", + inputs={"x": inputs_name[0] + "_len"}, + outputs=[inputs_name[0] + "_cond"], + y=dim) + graph.add_layer( + "prim.if", {'input': inputs_name[0] + "_cond"}, + outputs=[inputs_name[0] + "_if", output_name]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "prim.sub", + inputs={"y": inputs_name[0] + "_len"}, + outputs=[inputs_name[0] + "_len0"], + x=dim) + block.add_layer( + "prim.len2list", + inputs={"len": inputs_name[0] + "_len0"}, + outputs=[inputs_name[0] + "_list"]) + block.add_layer( + "paddle.tensor.unsqueeze", + inputs={"x": inputs_name[0], + "axis": inputs_name[0] + "_list"}, + outputs=[inputs_name[0] + "_var"]) + block.add_layer( + kernel, + inputs={"input": inputs_name[0] + "_var"}, + outputs=layer_outputs, + **layer_attrs) + block.add_layer( + "paddle.tensor.squeeze", + inputs={"x": output_name, + "axis": inputs_name[0] + "_list"}, + outputs=[output_name]) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + layer_inputs["input"] = inputs_name[0] + block.add_layer( + kernel, inputs=layer_inputs, outputs=layer_outputs, **layer_attrs) + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[0] + if_layer.inputs["input-1"] = inputs_name[0] + "_len" + + if len(layer_attrs["padding"]) == 2: + add_pad_layers("paddle.nn.ConstantPad1d", 3) + elif len(layer_attrs["padding"]) == 4: + add_pad_layers("paddle.nn.ConstantPad2d", 4) + elif len(layer_attrs["padding"]) == 6: + add_pad_layers("paddle.nn.ConstantPad3d", 5) + else: + raise Exception("The lenght of padding list must be 2, 4 or 6!") + return current_inputs, current_outputs + + +def aten_contiguous(mapper, graph, node): + """ 构造在内存中连续存储的PaddleLayer。 + + TorchScript示例: + %x.7 : Tensor = aten::contiguous(%4058, %4046) + 参数含义: + %x.7 (Tensor): 输出,在内存中连续存储的Tensor。 + %4058 (Tensor): 原始Tensor。 + %4046 (int): 存储的形式。 + + 【注意】Paddle中无此用法,所以此处翻译成赋值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%4058 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_conv2d(mapper, graph, node): + """ 构造conv2d的PaddleLayer。 + + TorchScript示例: + %input.10 : Tensor = aten::conv2d(%input.8, %25, %27, %28, %29, %30, %26) + 参数含义: + %input.10 (Tensor): 输出,卷积后的结果。 + %input.8 (Tensor): 需要进行卷积的特征层。 + %25 (Tensor): weights。 + %27 (Tensor): bias。 + %28 (int): 步长大小。 + %29 (int): 填充大小。 + %30 (int): 膨胀系数大小。 + %26 (int): 卷积的组数。 + """ + if "conv" in mapper.dygraph_name_id: + mapper.dygraph_name_id["conv"] += 1 + else: + mapper.dygraph_name_id["conv"] = 0 + conv2d_name = "conv" + str(mapper.dygraph_name_id["conv"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [conv2d_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.8 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%25 + weights = mapper.pytorch_params[inputs_name[1]] + mapper.paddle_params[conv2d_name + ".weight"] = weights + layer_attrs["out_channels"] = weights.shape[0] + layer_attrs["kernel_size"] = weights.shape[2:] + # 处理输入2,即%27 + if inputs_name[2] in mapper.pytorch_params: + bias = mapper.pytorch_params[inputs_name[2]] + if bias is not None: + mapper.paddle_params[conv2d_name + ".bias"] = bias + else: + layer_attrs["bias_attr"] = False + else: + layer_attrs["bias_attr"] = False + # 处理输入3,即%28 + layer_attrs["stride"] = mapper.attrs[inputs_name[3]] + # 处理输入4,即%29 + layer_attrs["padding"] = mapper.attrs[inputs_name[4]] + # 处理输入5,即%30 + layer_attrs["dilation"] = mapper.attrs[inputs_name[5]] + # 处理输入6,即%26 + layer_attrs["groups"] = mapper.attrs[inputs_name[6]] + layer_attrs['in_channels'] = weights.shape[1] * mapper.attrs[inputs_name[6]] + + graph.add_layer( + "paddle.nn.Conv2d", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten__convolution(mapper, graph, node): + """ 构造conv2d的PaddleLayer。 + + TorchScript示例: + %input.10 : Tensor = aten::_convolution(%input.8, %25, %27, %28, %29, %30, %26) + 参数含义: + %input.10 (Tensor): 输出,卷积后的结果。 + %input.8 (Tensor): 需要进行卷积的特征层。 + %25 (Tensor): weights。 + %27 (Tensor): bias。 + %28 (int): 步长大小。 + %29 (int): 填充大小。 + %30 (int): 膨胀系数大小。 + %26 (int): 卷积的组数。 + """ + if "conv" in mapper.dygraph_name_id: + mapper.dygraph_name_id["conv"] += 1 + else: + mapper.dygraph_name_id["conv"] = 0 + conv2d_name = "conv" + str(mapper.dygraph_name_id["conv"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [conv2d_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.8 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%25 + weights = mapper.pytorch_params[inputs_name[1]] + mapper.paddle_params[conv2d_name + ".weight"] = weights + layer_attrs["num_filters"] = weights.shape[0] + layer_attrs["filter_size"] = weights.shape[2:] + # 处理输入2,即%27 + if inputs_name[2] in mapper.pytorch_params: + bias = mapper.pytorch_params[inputs_name[2]] + if bias is not None: + mapper.paddle_params[conv2d_name + ".bias"] = bias + else: + layer_attrs["bias_attr"] = False + else: + layer_attrs["bias_attr"] = False + # 处理输入3,即%28 + layer_attrs["stride"] = mapper.attrs[inputs_name[3]] + # 处理输入4,即%29 + layer_attrs["padding"] = mapper.attrs[inputs_name[4]] + # 处理输入5,即%30 + layer_attrs["dilation"] = mapper.attrs[inputs_name[5]] + # 处理输入6,即%26 + layer_attrs["groups"] = mapper.attrs[inputs_name[6]] + layer_attrs['num_channels'] = weights.shape[1] * mapper.attrs[inputs_name[ + 6]] + + graph.add_layer( + "paddle.nn.Conv2D", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_cos(mapper, graph, node): + """ 构造数学计算cos的PaddleLayer。 + + TorchScript示例: + %94 : Tensor = aten::cos(%sinusoid_inp.1) + 参数含义: + %94 (Tensor): 输出,cos之后的结果。 + %sinusoid_inp.1 (Tensor): 需要进行shape的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%sinusoid_inp.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("paddle.cos", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_cumsum(mapper, graph, node): + """ 构造与前一个元素累加的PaddleLayer。 + + TorchScript示例: + %56 : Tensor = aten::cumsum(%mask.1, %46, %48) + 参数含义: + %56 (Tensor): 输出,累加后的结果。 + %mask.1 (Tensor): 输入,需要累加的Tensor。 + %46 (int): 累加的维度。 + %48 (int/None): Tensor的类型。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%mask.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%46 + if inputs_name[1] in mapper.attrs: + layer_attrs["axis"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["axis"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + # 处理输入1,即%48,代表dtype + if mapper.attrs[inputs_name[2]] is None: + layer_attrs["dtype"] = None + else: + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[2]]] + + graph.add_layer( + "paddle.cumsum", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_detach(mapper, graph, node): + """ 构造返回一个新的Tensor,从当前计算图中分离下来的,但是仍指向原变量的存放位置的PaddleLayer。 + + TorchScript示例: + %107 : Tensor = aten::detach(%new_mem.1) + 参数含义: + %107 (Tensor): 输出,得到的Scalar。 + %new_mem.1 (Tensor): 输入。 + + 【注意】由于Paddle无此操作,所以此处制转换为赋值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%end.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs) + + return current_inputs, current_outputs + + +def aten_dict(mapper, graph, node): + """ 构造初始化dict的PaddleLayer。 + + TorchScript示例: + %features.1 : Dict(str, Tensor) = aten::dict() + 参数含义: + %features.1: 输出,初始化的dict。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + current_inputs = {} + # 获取当前节点输出的list + current_outputs = [output_name] + + graph.add_layer("prim.dict", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_dim(mapper, graph, node): + """ 构造获取维度的PaddleLayer。 + + TorchScript示例: + %106 : int = aten::dim(%101) + 参数含义: + %106 (int): 输出,Tensor的维度。 + %101 (Tensor): 输入的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.8 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.shape", inputs=layer_inputs, outputs=layer_outputs) + graph.add_layer( + "prim.len", inputs={"input": output_name}, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_div_(mapper, graph, node): + """ 构造除法的PaddleLayer。 + + TorchScript示例: + %bx_bw0.3 : Tensor = aten::div_(%bx_bw.3, %2678) + 参数含义: + %bx_bw0.3 (-): 除后的结果。 + %bx_bw.3 (-): 被除数。 + %2678 (int): 除数。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%123 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.div", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_div(mapper, graph, node): + """ 构造除法的PaddleLayer。 + + TorchScript示例: + %bx_bw0.3 : Tensor = aten::div_(%bx_bw.3, %2678) + 参数含义: + %bx_bw0.3 (-): 除后的结果。 + %bx_bw.3 (-): 被除数。 + %2678 (int): 除数。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%123 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.div", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_dropout(mapper, graph, node): + """ 构造Dropout的PaddleLayer。 + + TorchScript示例: + %119 : Tensor = aten::dropout(%result.3, %117, %118) + 参数含义: + %119 (Tensor): Dropout后的Tensor。 + %result.3 (Tensor): 输入Tensor。 + %118 (bool): 是否是训练阶段。 + """ + if "dropout" in mapper.dygraph_name_id: + mapper.dygraph_name_id["dropout"] += 1 + else: + mapper.dygraph_name_id["dropout"] = 0 + dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [dropout_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%119 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.Dropout", inputs=layer_inputs, outputs=layer_outputs, p=0.0) + return current_inputs, current_outputs + + +def aten_dropout_(mapper, graph, node): + """ 构造Dropout的PaddleLayer。 + + TorchScript示例: + %119 : Tensor = aten::dropout_(%result.3, %117, %118) + 参数含义: + %119 (Tensor): Dropout后的Tensor。 + %result.3 (Tensor): 输入Tensor。 + %118 (bool): 是否是训练阶段。 + """ + if "dropout" in mapper.dygraph_name_id: + mapper.dygraph_name_id["dropout"] += 1 + else: + mapper.dygraph_name_id["dropout"] = 0 + dropout_name = "dropout" + str(mapper.dygraph_name_id["dropout"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [dropout_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%119 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.Dropout", inputs=layer_inputs, outputs=layer_outputs, p=0.0) + return current_inputs, current_outputs + + +def aten_embedding(mapper, graph, node): + """ 构造embedding的PaddleLayer。 + + TorchScript示例: + %inputs_embeds.1 : Tensor = aten::embedding(%57, %input_ids.1, %45, %46, %46) + 参数含义: + %inputs_embeds.1 (Tensor): 输出,embedding后的结果。 + %57 (Tensor): weights。 + %input_ids.1 (Tensor): 需要进行embedding的特征层。 + %45 (int): padding_idx。 + %46 (bool): scale_grad_by_freq。 + %46 (bool): sparse。 + """ + if "embedding" in mapper.dygraph_name_id: + mapper.dygraph_name_id["embedding"] += 1 + else: + mapper.dygraph_name_id["embedding"] = 0 + embedding_name = "embedding" + str(mapper.dygraph_name_id["embedding"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [embedding_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%57 + weights = mapper.pytorch_params[inputs_name[0]] + mapper.paddle_params[embedding_name + ".weight"] = weights + # layer_attrs["num_embeddings"] = weights.shape[0] + # layer_attrs["embedding_dim"] = weights.shape[1] + layer_attrs["size"] = weights.shape + # 处理输入1,即%input_ids.1 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["input"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入2,即%45 + if mapper.attrs[inputs_name[2]] == -1: + layer_attrs["padding_idx"] = None + else: + layer_attrs["padding_idx"] = mapper.attrs[inputs_name[2]] + # 处理输入4,即%46 + # layer_attrs["sparse"] = mapper.attrs[inputs_name[4]] + layer_attrs["is_sparse"] = mapper.attrs[inputs_name[4]] + + graph.add_layer( + "paddle.fluid.dygraph.Embedding", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_eq(mapper, graph, node): + """ 构造判断数值是否相等的PaddleLayer。 + + TorchScript示例: + %125 : bool = aten::eq(%124, %123) + 参数含义: + %125 (bool): 对比后结果。 + %124 (-): 需对比的输入1。 + %123 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + x_value = list(node.inputs())[0] + x_type = x_value.type() + # 处理输入1,即%123 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + y_value = list(node.inputs())[1] + y_type = y_value.type() + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_exp(mapper, graph, node): + """ 构造以自然数e为底指数运算的PaddleLayer。 + + TorchScript示例: + %55 : Tensor = aten::tanh(%54) + 参数含义: + %55 (Tensor): 输出,运算后的结果。 + %54 (Tensor): 需要指数运算的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.5 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.exp", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_expand(mapper, graph, node): + """ 构造对某维度进行广播的PaddleLayer。 + + TorchScript示例: + %1889 : Tensor = aten::expand(%1875, %1888, %1567) + 参数含义: + %1889 (Tensor): 广播后的结果。 + %1875 (Tensor): 需要广播的Tensor。 + %1888 (int): 广播的维度。 + %1567 (bool): 未使用。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%1875 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%1888 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + + graph.add_layer( + "prim.type", + inputs={"input": inputs_name[0]}, + outputs=[inputs_name[0] + "_type"]) + graph.add_layer( + "prim.str", + inputs={"input": inputs_name[0] + "_type"}, + outputs=[inputs_name[0] + "_type"]) + graph.add_layer( + "prim.eq", + inputs={"x": inputs_name[0] + "_type"}, + outputs=[inputs_name[0] + "_cond"], + y=string("VarType.BOOL")) + graph.add_layer( + "prim.if", {'input': inputs_name[0] + "_cond"}, + outputs=[inputs_name[0] + "_if1", inputs_name[1] + "_var"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "fluid.layers.cast", + inputs={"x": inputs_name[0]}, + outputs=[inputs_name[0]], + dtype=string("int64")) + block.add_layer( + "fluid.layers.create_global_var", + inputs={"shape": inputs_name[1]}, + outputs=[inputs_name[1] + "_var"], + value=1.0, + dtype=string("int64"), + persistable=True) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "prim.type", + inputs={"input": inputs_name[0]}, + outputs=[inputs_name[0] + "_type"]) + block.add_layer( + "fluid.layers.create_global_var", + inputs={"shape": inputs_name[1]}, + outputs=[inputs_name[1] + "_var"], + value=1.0, + dtype=inputs_name[0] + "_type", + persistable=True) + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[0] + if_layer.inputs["input-1"] = inputs_name[1] + + layer_inputs["target_tensor"] = inputs_name[1] + "_var" + current_outputs.append(inputs_name[1] + "_var") + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + current_inputs.append(inputs_name[1]) + + graph.add_layer( + "fluid.layers.expand_as", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_expand_as(mapper, graph, node): + """ 构造广播的PaddleLayer。 + + TorchScript示例: + %1889 : Tensor = aten::expand_as(%1875, %1888) + 参数含义: + %1889 (Tensor): 广播后的结果。 + %1875 (Tensor): 需要广播的Tensor。 + %1888 (Tensor): 广播的示例。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%1875 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%1888 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["target_tensor"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.type", + inputs={"input": inputs_name[0]}, + outputs=[inputs_name[0] + "_type"]) + graph.add_layer( + "prim.str", + inputs={"input": inputs_name[0] + "_type"}, + outputs=[inputs_name[0] + "_type"]) + graph.add_layer( + "prim.eq", + inputs={"x": inputs_name[0] + "_type"}, + outputs=[inputs_name[0] + "_cond"], + y=string("VarType.BOOL")) + graph.add_layer( + "prim.if", {'input': inputs_name[0] + "_cond"}, + outputs=[inputs_name[0] + "_if1"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "prim.type", + inputs={"input": inputs_name[1]}, + outputs=[inputs_name[1] + "_type"]) + block.add_layer( + "fluid.layers.cast", + inputs={"x": inputs_name[0]}, + outputs=[inputs_name[0]], + dtype=inputs_name[1] + "_type") + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[0] + if_layer.inputs["input-1"] = inputs_name[1] + graph.add_layer( + "fluid.layers.expand_as", inputs=layer_inputs, outputs=layer_outputs) + graph.add_layer( + "prim.if", {'input': inputs_name[0] + "_cond"}, + outputs=[inputs_name[0] + "_if2"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "fluid.layers.cast", + inputs={"x": layer_outputs[0]}, + outputs=layer_outputs, + dtype=string("bool")) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = layer_outputs[0] + return current_inputs, current_outputs + + +def aten_eye(mapper, graph, node): + """ 构造批次二维矩阵的PaddleLayer。 + + TorchScript示例: + %68 : Tensor = aten::eye(%49, %_50, %_51, %15, %9, %67, %7) + 参数含义: + %68 (Tensor): 输出,构造的矩阵。 + %49 (int): 行数。 + %_50 (int): 列数,非必须。 + %_51 (Tensor): 非必须。 + %9 (int): layout。 + %67 (str): 设备。 + %7 (bool): 是否计算梯度。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%49 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["num_rows"] = inputs_name[0] + if len(inputs_name) > 5: + # 处理输入1,即%_50 + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["num_columns"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理倒数第4个输入,即%15 + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[-4]]] + + graph.add_layer( + "fluid.layers.eye", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_flatten(mapper, graph, node): + """ 构造flatten的PaddleLayer。 + + TorchScript示例: + %x.8 : Tensor = aten::flatten(%x, %4, %2) + 参数含义: + %x.8 (Tensor): flatten后结果。 + %x (Tensor): 输入Tensor。 + %4 (int): flatten的开始维度。 + %2 (int): flatten的结束维度。 + + 注意:目前flatten只支持第一维的flatten + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入1,即%4 + graph.add_layer( + "prim.assert", + inputs={}, + outputs=[inputs_name[1]], + type='eq', + key=mapper.attrs[inputs_name[1]], + value=1) + # 处理输入2,即%2 + graph.add_layer( + "prim.assert", + inputs={}, + outputs=[inputs_name[2]], + type='eq', + key=mapper.attrs[inputs_name[2]], + value=-1) + # 处理输入0,即%x + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.flatten", + inputs=layer_inputs, + outputs=layer_outputs, + axis=1) + return current_inputs, current_outputs + + +def aten_Float(mapper, graph, node): + """ 构造取浮点型的PaddleLayer。 + + TorchScript示例: + %3992 : float = aten::Float(%3991) + 参数含义: + %3992 (int): 向上取整后的整数。 + %3991 (float): 需要取整的浮点数。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%3991 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.float", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_floor(mapper, graph, node): + """ 构造向上取整的PaddleLayer。 + + TorchScript示例: + %3978 : int = aten::floor(%scale.18) + 参数含义: + %3978 (int): 向上取整后的整数。 + %scale.18 (float): 需要取整的浮点数。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%scale.18 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.floor", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_floordiv(mapper, graph, node): + """ 构造向上取整除法的PaddleLayer。 + + TorchScript示例: + %channels_per_group.2 : int = aten::floordiv(%num_channels.2, %3690) + 参数含义: + %channels_per_group.2 (-): 除后的结果。 + %num_channels.2 (-): 被除数。 + %2 (int): 除数。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%123 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.floordiv", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_floor_divide(mapper, graph, node): + """ 构造向上取整除法的PaddleLayer。 + + TorchScript示例: + %channels_per_group.2 : int = aten::floor_divide(%num_channels.2, %3690) + 参数含义: + %channels_per_group.2 (-): 除后的结果。 + %num_channels.2 (-): 被除数。 + %2 (int): 除数。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%123 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.floordiv", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_full_like(mapper, graph, node): + """ 构造创建一个与输入具有相同的形状并且数据类型固定的Tensor的PaddleLayer。 + + TorchScript示例: + %159 : Tensor = aten::full_like(%val_if_large.3, %51, %50, %62, %53, %65, %66) + 参数含义: + %159 (Tensor): 输出,全为固定值的Tensor。 + %val_if_large.3 (Tensor): 类似形状的Tensor。 + %51 (int/float/bool): 填充值。 + %50 (int): dtype。 + %62 (int): layout。 + %53 (int): device。 + %65 (bool): 是否计算梯度。 + %66 (int): 内存形式。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%val_if_large.3 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%51 + if inputs_name[1] in mapper.attrs: + layer_attrs["fill_value"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["fill_value"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + # 处理输入2,即%50,代表dtype + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[2]]] + + graph.add_layer( + "paddle.full_like", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_gelu(mapper, graph, node): + """ 构造GeLU激活的PaddleLayer。 + + TorchScript示例: + %result.3 : Tensor = aten::gelu(%input.5) + 参数含义: + %result.3 (Tensor): 输出,GELU后的结果。 + %result.5 (Tensor): 需要GELU的Tensor。 + + 注意: inplace这个参数在paddle中未实现 + """ + if "gelu" in mapper.dygraph_name_id: + mapper.dygraph_name_id["gelu"] += 1 + else: + mapper.dygraph_name_id["gelu"] = 0 + gelu_name = "gelu" + str(mapper.dygraph_name_id["gelu"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [gelu_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.5 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.GELU", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten___getitem__(mapper, graph, node): + """ 构造获取list中元素的PaddleLayer。 + + TorchScript示例: + %v.1 : int = aten::__getitem__(%72, %88) + 参数含义: + %v.1 (-): 输出,list中的元素。 + %72 (list): 需要获取元素的list。 + %88 (int): 索引。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%72 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["list"] = inputs_name[0] + # 处理输入1,即%88 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["index"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.getitem", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_gt(mapper, graph, node): + """ 构造对比大小的PaddleLayer。 + + TorchScript示例: + %83 : bool = aten::gt(%82, %78) + 参数含义: + %83 (bool): 输出,第一个元素是否大于第二个元素。 + %82 (-): 需对比的输入1。 + %78 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%82 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%78 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.gt", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_hardtanh_(mapper, graph, node): + """ 构造hardtanh激活的PaddleLayer。 + + TorchScript示例: + %result.9 : Tensor = aten::hardtanh_(%input.20, %67, %66) + 参数含义: + %result.9 (Tensor): 输出,hardtanh激活后的Tensor。 + %input.20 (Tensor): 需要hardtanh激活的Tensor。 + %67 (float): hardtanh激活的最小阈值。 + %66 (float): hardtanh激活的最大阈值。 + """ + if "tanh" in mapper.dygraph_name_id: + mapper.dygraph_name_id["tanh"] += 1 + else: + mapper.dygraph_name_id["tanh"] = 0 + tanh_name = "tanh" + str(mapper.dygraph_name_id["tanh"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [tanh_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.20 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%67 + layer_attrs["min"] = mapper.attrs[inputs_name[1]] + # 处理输入2,即%66 + layer_attrs["max"] = mapper.attrs[inputs_name[2]] + + graph.add_layer( + 'paddle.nn.Hardtanh', + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_index_select(mapper, graph, node): + """ 构造对dict加入元素的PaddleLayer。 + + TorchScript示例: + %bd.3 : Tensor = aten::index_select(%x2.3, %320, %371) + 参数含义: + %bd.3 (Tensor): 输出,选择后的Tensor。 + %x2.3 (Tensor): 需要选择的Tensor。 + %320 (int): 维度。 + %371 (Tensor): 选择的索引。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x2.3 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%320 + if inputs_name[1] in mapper.attrs: + layer_attrs["axis"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["axis"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + # 处理输入2,即%371 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + layer_inputs["index"] = inputs_name[2] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.index_select", + inputs=layer_inputs, + outputs=current_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_Int(mapper, graph, node): + """ 构造强转为int的PaddleLayer。 + + TorchScript示例: + %1739 : int = aten::Int(%1738) + 参数含义: + %1739 (int): 输出,int型数据。 + %1738 (-): 需要强转的数据。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%1738 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.int", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten___is__(mapper, graph, node): + """ 构造is not的PaddleLayer。 + + TorchScript示例: + %3949 : bool = aten::__isnot__(%size.122, %3931) + 参数含义: + %3949 (bool): 输出,第一个元素是否不是第二个元素。 + %size.122 (-): 需对比的输入1。 + %3931 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%size.122 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%3931 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.is", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten___isnot__(mapper, graph, node): + """ 构造is not的PaddleLayer。 + + TorchScript示例: + %3949 : bool = aten::__isnot__(%size.122, %3931) + 参数含义: + %3949 (bool): 输出,第一个元素是否不是第二个元素。 + %size.122 (-): 需对比的输入1。 + %3931 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%size.122 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%3931 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.isnot", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_layer_norm(mapper, graph, node): + """ 构造层归一化的PaddleLayer。 + + TorchScript示例: + %input0.4 : Tensor = aten::layer_norm(%input.6, %1181, %174, %173, %70, %71) + 参数含义: + %input0.4 (Tensor): 输出,层归一化后的结果。 + %input.6 (Tensor): 需要进行层归一化的特征层。 + %1181 (list/int/tuple): 需规范化的shape。 + %174 (Tensor): weights。 + %173 (Tensor): bias。 + %70 (float): 指明在计算过程中是否添加较小的值到方差中以防止除零。 + %71 (bool): 是否启用cudnn。 + """ + if "layernorm" in mapper.dygraph_name_id: + mapper.dygraph_name_id["layernorm"] += 1 + else: + mapper.dygraph_name_id["layernorm"] = 0 + layernorm_name = "layernorm" + str(mapper.dygraph_name_id["layernorm"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [layernorm_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.6 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%1181 + layer_attrs["normalized_shape"] = mapper.attrs[inputs_name[1]] + # 处理输入2,即%174 + weights = mapper.pytorch_params[inputs_name[2]] + mapper.paddle_params[layernorm_name + ".weight"] = weights + # 处理输入3,即%173 + if inputs_name[3] in mapper.pytorch_params: + bias = mapper.pytorch_params[inputs_name[3]] + if bias is not None: + mapper.paddle_params[layernorm_name + ".bias"] = bias + else: + mapper.paddle_params[layernorm_name + ".bias"] = False + # 处理输入4,即%70 + layer_attrs["epsilon"] = mapper.attrs[inputs_name[4]] + + graph.add_layer( + "paddle.nn.LayerNorm", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_le(mapper, graph, node): + """ 构造对比大小的PaddleLayer。 + + TorchScript示例: + %80 : bool = aten::le(%78, %79) + 参数含义: + %80 (bool): 输出,第一个元素是否小于等于第二个元素。 + %78 (-): 需对比的输入1。 + %79 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%78 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%79 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.le", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_leaky_relu_(mapper, graph, node): + """ 构造leaky relu激活的PaddleLayer。 + + TorchScript示例: + %input.117 : Tensor = aten::leaky_relu_(%input.114, %1570) + 参数含义: + %input.117 (Tensor): 输出,leaky relu后的结果。 + %input.114 (Tensor): 需要leaky relu的Tensor。 + %1570 (float): 输入中的元素小于0时的斜率。 + """ + if "leaky_relu" in mapper.dygraph_name_id: + mapper.dygraph_name_id["leaky_relu"] += 1 + else: + mapper.dygraph_name_id["leaky_relu"] = 0 + leaky_relu_name = "leaky_relu" + str(mapper.dygraph_name_id["leaky_relu"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [leaky_relu_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.5 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%1570 + layer_attrs["negative_slope"] = mapper.attrs[inputs_name[1]] + + graph.add_layer( + "paddle.nn.LeakyReLU", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_len(mapper, graph, node): + """ 构造获取list长度的PaddleLayer。 + + TorchScript示例: + %85 : int = aten::len(%83) + 参数含义: + %85 (int): 输出,list的长度。 + %72 (list): 需要获取长度的list。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%72 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.len", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_log(mapper, graph, node): + """ 构构造log的PaddleLayer。 + + TorchScript示例: + %787 : Tensor = aten::log(%786) + 参数含义: + %787 (Tensor): 输出,取log的Tensor。 + %786 (Tensor): 需要获取log的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%786 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.log", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_lt(mapper, graph, node): + """ 构造对比大小的PaddleLayer。 + + TorchScript示例: + %80 : bool = aten::lt(%78, %79) + 参数含义: + %80 (bool): 输出,第一个元素是否小于第二个元素。 + %78 (-): 需对比的输入1。 + %79 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%78 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%79 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.lt", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_masked_fill_(mapper, graph, node): + """ 构造填充mask的PaddleLayer。 + + TorchScript示例: + %input.4 : Tensor = aten::masked_fill_(%scores.2, %mask.2, %46) + 参数含义: + %input.4 (Tensor): 输出,填充后的结果。 + %scores.2 (Tensor): 需要填充的Tensor。 + %mask.2 (Tensor): bool型的Tensor,哪些位置需要填充。 + %46 (-): 填充的值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输入的list + current_inputs = [] + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.4 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + current_inputs.append(inputs_name[0]) + graph.add_layer( + "prim.type", + inputs={"input": inputs_name[0]}, + outputs=[inputs_name[0] + "_type"]) + # 处理输入1,即%scores.2 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + current_inputs.append(inputs_name[1]) + graph.add_layer( + "paddle.logical_not", + inputs={"x": inputs_name[1]}, + outputs=[inputs_name[1] + "_not"]) + graph.add_layer( + "fluid.layers.cast", + inputs={"x": inputs_name[1]}, + outputs=[inputs_name[1] + "_mask"], + dtype=inputs_name[0] + "_type") + graph.add_layer( + "fluid.layers.cast", + inputs={"x": inputs_name[1] + "_not"}, + outputs=[inputs_name[1] + "_not_mask"], + dtype=inputs_name[0] + "_type") + graph.add_layer( + "paddle.multiply", + inputs={"x": inputs_name[0], + "y": inputs_name[1] + "_not_mask"}, + outputs=[inputs_name[0] + "_not_mask"]) + # 处理输入2,即%46 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + graph.add_layer( + "prim.eq", + inputs={"x": inputs_name[2]}, + outputs=[inputs_name[2] + "_cond1"], + y="-float('inf')") + graph.add_layer( + "prim.eq", + inputs={"x": inputs_name[2]}, + outputs=[inputs_name[2] + "_cond2"], + y="float('inf')") + graph.add_layer( + "prim.or", + inputs={ + "x": inputs_name[2] + "_cond1", + "y": inputs_name[2] + "_cond2" + }, + outputs=[inputs_name[2] + "_cond"]) + graph.add_layer( + "prim.if", {'input': inputs_name[2] + "_cond"}, + outputs=[inputs_name[2] + "_if"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "prim.equal", + inputs={"input": inputs_name[1] + "_mask"}, + outputs=[inputs_name[2] + "_1"]) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "prim.mul", + inputs={"x": inputs_name[1] + "_mask", + "y": inputs_name[2]}, + outputs=[inputs_name[2] + "_1"]) + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[1] + "_mask" + if_layer.inputs["input-1"] = inputs_name[2] + if_layer.outputs.append(inputs_name[2] + "_1") + graph.add_layer( + "fluid.layers.elementwise_add", + inputs={"x": inputs_name[2] + "_1", + "y": inputs_name[0] + "_not_mask"}, + outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_masked_fill(mapper, graph, node): + """ 构造填充mask的PaddleLayer。 + + TorchScript示例: + %input.4 : Tensor = aten::masked_fill(%scores.2, %mask.2, %46) + 参数含义: + %input.4 (Tensor): 输出,填充后的结果。 + %scores.2 (Tensor): 需要填充的Tensor。 + %mask.2 (Tensor): bool型的Tensor,哪些位置需要填充。 + %46 (-): 填充的值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输入的list + current_inputs = [] + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.4 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + current_inputs.append(inputs_name[0]) + graph.add_layer( + "prim.type", + inputs={"input": inputs_name[0]}, + outputs=[inputs_name[0] + "_type"]) + # 处理输入1,即%scores.2 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + current_inputs.append(inputs_name[1]) + graph.add_layer( + "paddle.logical_not", + inputs={"x": inputs_name[1]}, + outputs=[inputs_name[1] + "_not"]) + graph.add_layer( + "fluid.layers.cast", + inputs={"x": inputs_name[1]}, + outputs=[inputs_name[1] + "_mask"], + dtype=inputs_name[0] + "_type") + graph.add_layer( + "fluid.layers.cast", + inputs={"x": inputs_name[1] + "_not"}, + outputs=[inputs_name[1] + "_not_mask"], + dtype=inputs_name[0] + "_type") + graph.add_layer( + "paddle.multiply", + inputs={"x": inputs_name[0], + "y": inputs_name[1] + "_not_mask"}, + outputs=[inputs_name[0] + "_not_mask"]) + # 处理输入2,即%46 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + graph.add_layer( + "prim.eq", + inputs={"x": inputs_name[2]}, + outputs=[inputs_name[2] + "_cond1"], + y="-float('inf')") + graph.add_layer( + "prim.eq", + inputs={"x": inputs_name[2]}, + outputs=[inputs_name[2] + "_cond2"], + y="float('inf')") + graph.add_layer( + "prim.or", + inputs={ + "x": inputs_name[2] + "_cond1", + "y": inputs_name[2] + "_cond2" + }, + outputs=[inputs_name[2] + "_cond"]) + graph.add_layer( + "prim.if", {'input': inputs_name[2] + "_cond"}, + outputs=[inputs_name[2] + "_if"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "prim.equal", + inputs={"input": inputs_name[1] + "_mask"}, + outputs=[inputs_name[2] + "_1"]) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "prim.mul", + inputs={"x": inputs_name[1] + "_mask", + "y": inputs_name[2]}, + outputs=[inputs_name[2] + "_1"]) + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[1] + "_mask" + if_layer.inputs["input-1"] = inputs_name[2] + if_layer.outputs.append(inputs_name[2] + "_1") + graph.add_layer( + "fluid.layers.elementwise_add", + inputs={"x": inputs_name[2] + "_1", + "y": inputs_name[0] + "_not_mask"}, + outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_max(mapper, graph, node): + """ 构造获取最大值的PaddleLayer。 + + TorchScript示例: + %val_if_large0.3 : Tensor = aten::max(%val_if_large.3, %159) + 参数含义: + %val_if_large0.3 (Tensor): 输出,对比后的结果。 + %val_if_large.3 (Tensor): 输入,需要对比的Tensor1。 + %159 (Tensor): 输入,需要对比的Tensor2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + input_type = list(node.inputs())[1].type() + if str(input_type) == "Tensor": + # 处理输入0,即%val_if_large.3 + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%159 + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + graph.add_layer( + "paddle.maximum", inputs=layer_inputs, outputs=layer_outputs) + else: + pass + return current_inputs, current_outputs + + +def aten_max_pool2d(mapper, graph, node): + """ 构造最大池化的PaddleLayer。 + + TorchScript示例: + %input.8 : Tensor = aten::max_pool2d(%result.11, %20, %23, %21, %22, %19) + 参数含义: + %input.8 (Tensor): 输出,池化后的结果。 + %result.11 (Tensor): 需要池化的Tensor。 + %20 (list): 池化kernel的大小。 + %23 (list): 步长大小。 + %21 (list): 填充大小。 + %22 (list): 膨胀系数大小。 + %19 (bool): 是否用ceil函数计算输出高度和宽度。 + """ + if "pool" in mapper.dygraph_name_id: + mapper.dygraph_name_id["pool"] += 1 + else: + mapper.dygraph_name_id["pool"] = 0 + pool_name = "pool" + str(mapper.dygraph_name_id["pool"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [pool_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.11 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%20 + layer_attrs["pool_size"] = mapper.attrs[inputs_name[1]] + # 处理输入2,即%23 + layer_attrs["pool_stride"] = mapper.attrs[inputs_name[2]] + # 处理输入3,即%21 + layer_attrs["pool_padding"] = mapper.attrs[inputs_name[3]] + # 处理输入4,即%22 + graph.add_layer( + "prim.assert", + inputs={}, + outputs=[inputs_name[4]], + type="eq", + key=mapper.attrs[inputs_name[4]], + value=[1, [1, 1]]) + # 处理输入5,即%19 + layer_attrs["ceil_mode"] = mapper.attrs[inputs_name[5]] + layer_attrs["pool_type"] = string("max") + + graph.add_layer( + "paddle.nn.Pool2D", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_matmul(mapper, graph, node): + """ 构造矩阵相乘的PaddleLayer。 + + TorchScript示例: + %output.2 : Tensor = aten::matmul(%101, %111) + 参数含义: + %output.2 (Tensor): 输出,相乘后的结果。 + %101 (Tensor): 矩阵1。 + %102 (Tensor): 矩阵2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%101 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%102 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("paddle.matmul", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_min(mapper, graph, node): + """ 构造获取最小值的PaddleLayer。 + + TorchScript示例: + %val_if_large0.3 : Tensor = aten::min(%val_if_large.3, %159) + 参数含义: + %val_if_large0.3 (Tensor): 输出,对比后的结果。 + %val_if_large.3 (Tensor): 输入,需要对比的Tensor1。 + %159 (Tensor): 输入,需要对比的Tensor2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + input_type = list(node.inputs())[1].type() + if str(input_type) == "Tensor": + # 处理输入0,即%val_if_large.3 + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%159 + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + graph.add_layer( + "paddle.minimum", inputs=layer_inputs, outputs=layer_outputs) + else: + pass + return current_inputs, current_outputs + + +def aten_mean(mapper, graph, node): + """ 构造求均值的PaddleLayer。 + + TorchScript示例: + %x.28 : Tensor = aten::mean(%result.1, %4967, %3, %2) + 参数含义: + %x.28 (Tensor): 输出,求均值后的结果。 + %result.1 (Tensor): 输入,需要求均值的Tensor。 + %4967 (int/list): 求平均值运算的维度。 + %3 (bool): 是否在输出Tensor中保留减小的维度。 + %2 (Tensor): 结果Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%4967 + if inputs_name[1] in mapper.attrs: + layer_attrs["dim"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["dim"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + # 处理输入2,即%3 + if inputs_name[1] in mapper.attrs: + layer_attrs["keep_dim"] = mapper.attrs[inputs_name[2]] + else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs) + layer_inputs["keep_dim"] = inputs_name[2] + current_inputs.append(inputs_name[2]) + + graph.add_layer( + "fluid.layers.reduce_mean", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_mul(mapper, graph, node): + """ 构造数值相乘的PaddleLayer。 + + TorchScript示例: + %size_prods.39 : int = aten::mul(%size_prods.38, %114) + 参数含义: + %size_prods.39 (Tensor): 输出,相乘后的结果。 + %size_prods.38 (-): 数值1。 + %114 (-): 数值2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%size_prods.38 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%114 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + current_outputs = layer_outputs + + graph.add_layer("prim.mul", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_mul_(mapper, graph, node): + """ 构造数值相乘的PaddleLayer。 + + TorchScript示例: + %size_prods.39 : int = aten::mul_(%size_prods.38, %114) + 参数含义: + %size_prods.39 (Tensor): 输出,相乘后的结果。 + %size_prods.38 (-): 数值1。 + %114 (-): 数值2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%size_prods.38 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%114 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + current_outputs = layer_outputs + + graph.add_layer("prim.mul", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_ne(mapper, graph, node): + """ 构造判断数值是否不相等的PaddleLayer。 + + TorchScript示例: + %134 : bool = aten::ne(%133, %132) + 参数含义: + %134 (bool): 对比后结果。 + %133 (-): 需对比的输入1。 + %132 (-): 需对比的输入2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%123 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.ne", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_neg(mapper, graph, node): + """ 构造对数值取负的PaddleLayer。 + + TorchScript示例: + %909 : int = aten::neg(%908) + 参数含义: + %909 (int): 取负后结果。 + %908 (int): 需取负的输入。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.neg", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten___not__(mapper, graph, node): + """ 构造对bool型取负的PaddleLayer。 + + TorchScript示例: + %4498 : bool = aten::__not__(%aux_defined.2) + 参数含义: + %4498 (bool): 取负后结果。 + %aux_defined.2 (bool): 需取负的输入。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%124 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.not", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_ones(mapper, graph, node): + """ 构造创建固定形状、数据类型且值全为0的Tensor的PaddleLayer。 + + TorchScript示例: + %input.49 : Tensor = aten::ones(%23, %8, %6, %24, %5) + 参数含义: + %input.49 (Tensor): 输出,全0的Tensor。 + %23 (list): 形状。 + %8 (int): 类型dtype。 + %6 (int): layout。 + %4995 (Device): 设备。 + %4995 (bool): 是否计算梯度。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + current_inputs = [] + # 处理输入0,即%23,代表end + if inputs_name[0] in mapper.attrs: + layer_attrs["shape"] = mapper.attrs[inputs_name[0]] + else: + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["shape"] = inputs_name[0] + current_inputs.append(inputs_name[0]) + # 处理输入1,即%8,代表dtype + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[1]]] + + graph.add_layer( + "paddle.ones", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_permute(mapper, graph, node): + """ 构造对bool型取负的PaddleLayer。 + + TorchScript示例: + %2385 : Tensor = aten::permute(%cls_confs0.2, %2384) + 参数含义: + %2385 (Tensor): 重排后的结果。 + %cls_confs0.2 (Tensor): 需要重排的Tensor。 + %2348 (list): 依照此参数进行重排。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%cls_confs0.2 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%2348 + if inputs_name[1] in mapper.attrs: + layer_attrs["perm"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["perm"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + + graph.add_layer( + "fluid.layers.transpose", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_pow(mapper, graph, node): + """ 构造指数激活的PaddleLayer。 + + TorchScript示例: + %x.6 : Tensor = aten::pow(%4700, %4703) + 参数含义: + %x.6 (Tensor): 输出,指数激活后的Tensor。 + %4700 (Tensor): 需要指数激活的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%4700 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%4703 + if inputs_name[1] in mapper.attrs: + layer_attrs["factor"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["factor"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + + graph.add_layer( + "fluid.layers.pow", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_relu(mapper, graph, node): + """ 构造ReLU激活的PaddleLayer。 + + TorchScript示例: + %result.3 : Tensor = aten::relu(%input.5) + 参数含义: + %result.3 (Tensor): 输出,ReLU后的结果。 + %result.5 (Tensor): 需要ReLU的Tensor。 + + 注意: inplace这个参数在paddle中未实现 + """ + if "relu" in mapper.dygraph_name_id: + mapper.dygraph_name_id["relu"] += 1 + else: + mapper.dygraph_name_id["relu"] = 0 + relu_name = "relu" + str(mapper.dygraph_name_id["relu"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [relu_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.5 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.ReLU", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_relu_(mapper, graph, node): + """ 构造ReLU激活的PaddleLayer。 + + TorchScript示例: + %result.3 : Tensor = aten::relu_(%input.5) + 参数含义: + %result.3 (Tensor): 输出,ReLU后的结果。 + %result.5 (Tensor): 需要ReLU的Tensor。 + + 注意: inplace这个参数在paddle中未实现 + """ + if "relu" in mapper.dygraph_name_id: + mapper.dygraph_name_id["relu"] += 1 + else: + mapper.dygraph_name_id["relu"] = 0 + relu_name = "relu" + str(mapper.dygraph_name_id["relu"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [relu_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.5 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.ReLU", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_relu6(mapper, graph, node): + """ 构造ReLU6激活的PaddleLayer。 + + TorchScript示例: + %result.3 : Tensor = aten::relu6(%input.5) + 参数含义: + %result.3 (Tensor): 输出,ReLU6后的结果。 + %result.5 (Tensor): 需要ReLU6的Tensor。 + + 注意: inplace这个参数在paddle中未实现 + """ + if "relu6" in mapper.dygraph_name_id: + mapper.dygraph_name_id["relu6"] += 1 + else: + mapper.dygraph_name_id["relu6"] = 0 + relu6_name = "relu6" + str(mapper.dygraph_name_id["relu6"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [relu6_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.5 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.ReLU6", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_repeat(mapper, graph, node): + """ 构造根据参数对输入各维度进行复制的PaddleLayer。 + + TorchScript示例: + 701 : Tensor = aten::repeat(%699, %700) + 参数含义: + %701 (Tensor): 输出,复制后的Tensor。 + %699 (Tensor): 需要复制的Tensor。 + %700 (list): 指定每个维度复制的次数。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%699 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%700 + if inputs_name[1] in mapper.attrs: + layer_attrs["repeat_times"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["repeat_times"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + + graph.add_layer( + "paddle.tile", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_reshape(mapper, graph, node): + """ 构造调整大小的PaddleLayer。 + + TorchScript示例: + %x.6 : Tensor = aten::reshape(%4700, %4703) + 参数含义: + %x.6 (Tensor): 输出,reshape后的Tensor。 + %4700 (Tensor): 需要reshape的Tensor。 + %4703 (list): 形状大小组成的list。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%4700 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%4703 + if inputs_name[1] in mapper.attrs: + layer_attrs["shape"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["shape"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + + graph.add_layer( + "prim.type", + inputs={"input": inputs_name[0]}, + outputs=[inputs_name[0] + "_type"]) + graph.add_layer( + "prim.str", + inputs={"input": inputs_name[0] + "_type"}, + outputs=[inputs_name[0] + "_type"]) + graph.add_layer( + "prim.eq", + inputs={"x": inputs_name[0] + "_type"}, + outputs=[inputs_name[0] + "_cond"], + y=string("VarType.BOOL")) + graph.add_layer( + "prim.if", {'input': inputs_name[0] + "_cond"}, + outputs=[inputs_name[0] + "_if1"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "fluid.layers.cast", + inputs={"x": inputs_name[0]}, + outputs=[inputs_name[0]], + dtype=string("int32")) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[0] + graph.add_layer( + "fluid.layers.reshape", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + graph.add_layer( + "prim.if", {'input': inputs_name[0] + "_cond"}, + outputs=[inputs_name[0] + "_if2"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "fluid.layers.cast", + inputs={"x": layer_outputs[0]}, + outputs=layer_outputs, + dtype=string("bool")) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = layer_outputs[0] + return current_inputs, current_outputs + + +def aten_rsub(mapper, graph, node): + """ 构造数值相减的PaddleLayer,计算公式为:out = y - alpha * x。 + + TorchScript示例: + %31 : Tensor = aten::rsub(%30, %13, %7) + 参数含义: + %31 (Tensor): 相减结果。 + %30 (Tensor): 输入Tensor x。 + %13 (int/float): 输入数值 y。 + %7 (int/float): alpha。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%30 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%13 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["y"] = inputs_name[1] + # 处理输入2,即%7 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + layer_inputs["alpha"] = inputs_name[2] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.rsub", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_ScalarImplicit(mapper, graph, node): + """ 构造获取scalar的PaddleLayer。 + + TorchScript示例: + %89 : Scalar = aten::ScalarImplicit(%end.1) + 参数含义: + %89 (Scalar): 输出,得到的Scalar。 + %end.1 (-): 组要转换的数据。 + + 【注意】由于Paddle无Scalar,所以最后转换为Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%end.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + input_type = list(node.inputs())[0].type() + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + if str(input_type) == "Tensor": + graph.add_layer( + "prim.equal", inputs=layer_inputs, outputs=layer_outputs) + else: + raise Exception( + "The input type {} of aten::ScalarImplicit is not implemented yet!" + ).format(input_type) + return current_inputs, current_outputs + + +def aten_select(mapper, graph, node): + """ 构造选取特定维度Variable的PaddleLayer。 + + TorchScript示例: + %19 : Tensor = aten::select(%18, %8, %7) + 参数含义: + %19 (Tensor): 输出,选取的Tensor。 + %18 (Tensor): 需要选取的Tensor。 + %8 (int): select的维度。 + %7 (int): select的第n个向量。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%18 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 处理输入1,即%8 + layer_attrs["dim"] = mapper.attrs[inputs_name[1]] + # 处理输入2,即%75 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + layer_inputs["index"] = inputs_name[2] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.select", + inputs=layer_inputs, + outputs=current_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten__set_item(mapper, graph, node): + """ 构造对dict加入元素的PaddleLayer。 + + TorchScript示例: + = aten::_set_item(%features.1, %out_name.1, %x.3) + 参数含义: + %features.1 (list): dict。 + %out_name.1 (-): dict的key。 + %x.3 (-): dict的value。 + """ + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [] + # 处理输入0,即%features.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["dict"] = inputs_name[0] + # 处理输入1,即%out_name.1 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["key"] = inputs_name[1] + # 处理输入2,即%x.3 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + layer_inputs["value"] = inputs_name[2] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.set_item", inputs=layer_inputs, outputs=[]) + return current_inputs, current_outputs + + +def aten_sigmoid(mapper, graph, node): + """ 构造sigmoid激活的PaddleLayer。 + + TorchScript示例: + %55 : Tensor = aten::sigmoid(%54) + 参数含义: + %55 (Tensor): 输出,sigmoid后的结果。 + %54 (Tensor): 需要tanh的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%54 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.sigmoid", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_sin(mapper, graph, node): + """ 构造数学计算sin的PaddleLayer。 + + TorchScript示例: + %94 : Tensor = aten::sin(%sinusoid_inp.1) + 参数含义: + %94 (Tensor): 输出,sin之后的结果。 + %sinusoid_inp.1 (Tensor): 需要进行shape的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%sinusoid_inp.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("paddle.sin", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_size(mapper, graph, node): + """ 构造获取shape的PaddleLayer。 + + TorchScript示例: + %73 : int[] = aten::size(%x.12, %10) + 参数含义: + %73 (list): 输出,shape的list。 + %x.12 (Tensor): 需要获取shape的Tensor。 + %10 (int): 非必须,代表维度。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.12 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + if len(inputs_name) > 1: + # 处理输入1,即%12 + if inputs_name[1] in mapper.attrs: + layer_attrs["dim"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["dim"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "prim.shape_dim", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + graph.add_layer( + "fluid.layers.shape", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_slice(mapper, graph, node): + """ 构造切分list或Variable的PaddleLayer。 + + TorchScript示例: + %83 : int[] = aten::slice(%73, %_81, %82, %75, %77) + 参数含义: + %83 (list/Tensor): 输出,切分后的list。 + %73 (list/Tensor): 需要切分的list。 + %_81 (int): 切分的维度,不一定存在。 + %82 (int): 切分的开始索引。 + %75 (int): 切分的结束索引。 + %77 (int): 切分的步长。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + if len(inputs_name) == 5: + # 处理输入0,即%73 + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["input"] = inputs_name[0] + + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%_81 + if inputs_name[1] in mapper.attrs: + graph.add_layer( + "prim.list", + inputs={}, + outputs=[inputs_name[1] + "_list"], + input0=mapper.attrs[inputs_name[1]]) + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + graph.add_layer( + "prim.list", + inputs={"input0": inputs_name[1]}, + outputs=[inputs_name[1] + "_list"]) + current_inputs.append(inputs_name[1]) + layer_inputs["axes"] = inputs_name[1] + "_list" + current_inputs.append(inputs_name[1] + "_list") + current_outputs.append(inputs_name[1] + "_list") + # 处理输入2,即%82 + if inputs_name[2] in mapper.attrs: + graph.add_layer( + "prim.list", + inputs={}, + outputs=[inputs_name[2] + "_list"], + input0=mapper.attrs[inputs_name[2]]) + else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs) + graph.add_layer( + "prim.list", + inputs={"input0": inputs_name[2]}, + outputs=[inputs_name[2] + "_list"]) + current_inputs.append(inputs_name[2]) + layer_inputs["starts"] = inputs_name[2] + "_list" + current_inputs.append(inputs_name[2] + "_list") + current_outputs.append(inputs_name[2] + "_list") + # 处理输入3,即%85 + if inputs_name[3] in mapper.attrs: + graph.add_layer( + "prim.list", + inputs={}, + outputs=[inputs_name[3] + "_list"], + input0=mapper.attrs[inputs_name[3]]) + else: + mapper._check_input(graph, inputs_node[3], inputs_name[3], + current_outputs) + graph.add_layer( + "prim.list", + inputs={"input0": inputs_name[3]}, + outputs=[inputs_name[3] + "_list"]) + current_inputs.append(inputs_name[3]) + layer_inputs["ends"] = inputs_name[3] + "_list" + current_inputs.append(inputs_name[3] + "_list") + current_outputs.append(inputs_name[3] + "_list") + # 处理输入4,即%77 + if inputs_name[4] in mapper.attrs: + graph.add_layer( + "prim.list", + inputs={}, + outputs=[inputs_name[4] + "_list"], + input0=mapper.attrs[inputs_name[4]]) + else: + mapper._check_input(graph, inputs_node[4], inputs_name[4], + current_outputs) + graph.add_layer( + "prim.list", + inputs={"input0": inputs_name[4]}, + outputs=[inputs_name[4] + "_list"]) + current_inputs.append(inputs_name[4]) + layer_inputs["strides"] = inputs_name[4] + "_list" + current_inputs.append(inputs_name[4] + "_list") + current_outputs.append(inputs_name[4] + "_list") + + graph.add_layer( + "fluid.layers.strided_slice", + inputs=layer_inputs, + outputs=layer_outputs) + else: + # 处理输入0,即%73 + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["input"] = inputs_name[0] + # 处理输入1,即%82 + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["start"] = inputs_name[1] + # 处理输入2,即%75 + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs) + layer_inputs["end"] = inputs_name[2] + # 处理输入3,即%77 + mapper._check_input(graph, inputs_node[3], inputs_name[3], + current_outputs) + layer_inputs["step"] = inputs_name[3] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.slice", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_softmax(mapper, graph, node): + """ 构造softmax激活的PaddleLayer。 + + TorchScript示例: + %input2.1 : Tensor = aten::softmax(%input.5, %80, %72) + 参数含义: + %input2.1 (Tensor): 激活后结果。 + %input.5 (Tensor): 需要激活的Tensor。 + %80 (int): 指定对输入Tensor进行运算的轴。 + %72 (str): 类型,默认为None。 + """ + if "softmax" in mapper.dygraph_name_id: + mapper.dygraph_name_id["softmax"] += 1 + else: + mapper.dygraph_name_id["softmax"] = 0 + softmax_name = "softmax" + str(mapper.dygraph_name_id["softmax"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [softmax_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.31 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + layer_attrs["axis"] = mapper.attrs[inputs_name[1]] + + graph.add_layer( + "paddle.nn.Softmax", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_softplus(mapper, graph, node): + """ 构造softplus激活的PaddleLayer。 + + TorchScript示例: + %54 : Tensor = aten::softplus(%x.31, %30, %29) + 参数含义: + %54 (Tensor): 激活后结果。 + %x.31 (Tensor): 需要激活的Tensor。 + %30 (int): beta。 + %29 (int): 阈值。 + """ + if "softplus" in mapper.dygraph_name_id: + mapper.dygraph_name_id["softplus"] += 1 + else: + mapper.dygraph_name_id["softplus"] = 0 + softplus_name = "softplus" + str(mapper.dygraph_name_id["softplus"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [softplus_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.31 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + layer_attrs["beta"] = mapper.attrs[inputs_name[1]] + layer_attrs["threshold"] = mapper.attrs[inputs_name[2]] + + graph.add_layer( + "paddle.nn.Softplus", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_sqrt(mapper, graph, node): + """ 构构造sqrt的PaddleLayer。 + + TorchScript示例: + %787 : Tensor = aten::sqrt(%786) + 参数含义: + %787 (Tensor): 输出,取sqrt的Tensor。 + %786 (Tensor): 需要获取sqrt的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%786 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.sqrt", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_squeeze(mapper, graph, node): + """ 构造删除位数为1的维度的PaddleLayer。 + + TorchScript示例: + %12 : Tensor = aten::squeeze(%start_logits.1, %4) + 参数含义: + %12 (Tensor): 输出,删除维度后的Tensor。 + %start_logits.1 (Tensor): 需要删除维度的Tensor。 + %4 (int): 维度。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%start_logits.1 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%4 + if inputs_name[1] in mapper.attrs: + layer_attrs["axis"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["axis"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "paddle.tensor.squeeze", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_stack(mapper, graph, node): + """ 构造堆叠Tensor的PaddleLayer。 + + TorchScript示例: + %x.222 : Tensor = aten::stack(%32, %7) + 参数含义: + %x.222 (Tensor): 输出,堆叠后的结果。 + %i.12 (Tensor): 需要堆叠的Tensor组成的Tensor。 + %7 (int): 堆叠的轴。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%13 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%12 + if inputs_name[1] in mapper.attrs: + layer_attrs["axis"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["axis"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "paddle.stack", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_sub(mapper, graph, node): + """ 构造数值相减的PaddleLayer。 + + TorchScript示例: + %840 : int = aten::sub(%839, %836) + 参数含义: + %840 (-): 相减结果。 + %839 (-): 输入数值 x。 + %836 (-): 输入数值 y。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%839 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%836 + mapper._check_input( + graph, inputs_node[1], inputs_name[1], current_outputs, add_dim=True) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.sub", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_t(mapper, graph, node): + """ 构造矩阵转置的PaddleLayer。 + + TorchScript示例: + %840 : int = aten::sub(%839, %836) + 参数含义: + %109 (Tensor): 输出,转置后的矩阵。 + %102 (Tensor): 需要转置的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.12 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.transpose", + inputs=layer_inputs, + outputs=layer_outputs, + perm=[1, 0]) + return current_inputs, current_outputs + + +def aten_tanh(mapper, graph, node): + """ 构造tanh激活的PaddleLayer。 + + TorchScript示例: + %55 : Tensor = aten::tanh(%54) + 参数含义: + %55 (Tensor): 输出,tanh后的结果。 + %54 (Tensor): 需要tanh的Tensor。 + """ + if "tanh" in mapper.dygraph_name_id: + mapper.dygraph_name_id["tanh"] += 1 + else: + mapper.dygraph_name_id["tanh"] = 0 + tanh_name = "tanh" + str(mapper.dygraph_name_id["tanh"]) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [tanh_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.5 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.Tanh", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_split(mapper, graph, node): + """ 构造分割Tensor的PaddleLayer。 + + TorchScript示例: + %160 : Tensor[] = aten::split(%159, %135, %123) + 参数含义: + %160 (Tensor): 输出,分割后的矩阵。 + %159 (Tensor): 需要分割的Tensor。 + %135 (int): 分割的数量。 + %723 (int): 轴。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%159 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 处理输入2,即%723 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + layer_inputs["dim"] = inputs_name[2] + # 处理输入1,即%135 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + input_type = list(node.inputs())[0].type() + if "[]" in str(input_type): + layer_inputs["num_or_sections"] = inputs_name[1] + else: + layer_attrs["num_or_sections"] = 1 + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.split", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_transpose(mapper, graph, node): + """ 构造矩阵转置的PaddleLayer。 + + TorchScript示例: + %715 : Tensor = aten::transpose(%x.21, %704, %705) + 参数含义: + %715 (Tensor): 输出,转置后的矩阵。 + %x.21 (Tensor): 需要转置的Tensor。 + %704 (int): 转置的维度1。 + %705 (int): 转置的维度2。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.21 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%704 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + dim1 = inputs_name[1] + # 处理输入2,即%705 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + dim2 = inputs_name[2] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + graph.add_layer( + "fluid.layers.shape", + inputs={"input": inputs_name[0]}, + outputs=[output_name + "_shape"]) + current_outputs.append(output_name + "_shape") + graph.add_layer( + "prim.len", + inputs={"input": output_name + "_shape"}, + outputs=[output_name + "_len"]) + current_outputs.append(output_name + "_len") + current_inputs.append(output_name + "_shape") + graph.add_layer( + "prim.len2list", + inputs={"len": output_name + "_len"}, + outputs=[output_name + "_list"]) + current_outputs.append(output_name + "_list") + current_inputs.append(output_name + "_len") + graph.add_layer( + "prim.check_dim", + inputs={"len": output_name + "_len", + "dim": dim1}, + outputs=[dim1 + "_new"]) + graph.add_layer( + "prim.check_dim", + inputs={"len": output_name + "_len", + "dim": dim2}, + outputs=[dim2 + "_new"]) + graph.add_layer( + "prim.replaceitem", + inputs={ + "list": output_name + "_list", + "index": dim1 + "_new", + "item": dim2 + "_new" + }, + outputs=[]) + graph.add_layer( + "prim.replaceitem", + inputs={ + "list": output_name + "_list", + "index": dim2 + "_new", + "item": dim1 + "_new" + }, + outputs=[]) + graph.add_layer( + "fluid.layers.transpose", + inputs=layer_inputs, + outputs=layer_outputs, + perm=output_name + "_list") + return current_inputs, current_outputs + + +def aten_to(mapper, graph, node): + """ 构造类型转换的PaddleLayer。 + + TorchScript示例: + %30 : Tensor = aten::to(%extended_attention_mask.1, %12, %5, %5, %4) + 参数含义: + %30 (Tensor): 转换后的Tensor。 + %extended_attention_mask.1 (Tensor): 需要转换的Tensor。 + %12 (int): 转换的类型。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%13 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%12 + if len(inputs_name) == 6: + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[2]]] + else: + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[1]]] + + graph.add_layer( + "fluid.layers.cast", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_type_as(mapper, graph, node): + """ 构造转换Tensor类型的PaddleLayer。 + + TorchScript示例: + %57 : Tensor = aten::type_as(%56, %mask.1) + 参数含义: + %57 (Tensor): 输出,改变类型后的Tensor。 + %56 (Tensor): 需要改变类型的Tensor。 + %mask.1 (Tensor): 转换成与该Tensor相一致的类型。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%56 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入0,即%mask.1 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + graph.add_layer( + "prim.type", + inputs={"input": inputs_name[1]}, + outputs=[inputs_name[1] + "_type"]) + layer_inputs["dtype"] = inputs_name[1] + "_type" + current_inputs.append(inputs_name[1]) + + graph.add_layer( + "fluid.layers.cast", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_unsqueeze(mapper, graph, node): + """ 构造插入维度的PaddleLayer。 + + TorchScript示例: + %13 : Tensor = aten::unsqueeze(%12, %7) + 参数含义: + %13 (Tensor): 输出,插入维度后的Tensor。 + %12 (Tensor): 需要插入维度的Tensor。 + %7 (int): 维度。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%13 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%12 + if inputs_name[1] in mapper.attrs: + layer_attrs["axis"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["axis"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "paddle.tensor.unsqueeze", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_upsample_bilinear2d(mapper, graph, node): + """ 构造使用bilinear上采样的PaddleLayer。 + + TorchScript示例: + %4997 : Tensor = aten::upsample_bilinear2d(%x.13, %4963, %5421, %4995, %4996) + 参数含义: + %4997 (Tensor): 输出,上采样后的Tensor。 + %x.13 (Tensor): 需要上采样的Tensor。 + %4963 (list): 上采样后的大小。 + %5421 (bool): 若为True,则将输入和输出张量的4个角落像素的中心对齐,并保留角点像素的值。 + %4995 (float): 高度的乘数因子。 + %4995 (float): 宽度的乘数因子。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.13 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%4963 + if inputs_name[1] in mapper.attrs: + layer_attrs["size"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["size"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "prim.isinstance", + inputs={"input": inputs_name[1]}, + outputs=[inputs_name[1] + "_isinstance"], + cls="paddle.fluid.Variable") + graph.add_layer( + "prim.if", {"input": inputs_name[1] + "_isinstance"}, + outputs=[inputs_name[0] + "_if1"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "prim.var2list", + inputs={"input": inputs_name[1]}, + outputs=[inputs_name[1]]) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[1] + # 处理输入2,即%5421 + if inputs_name[2] in mapper.attrs: + layer_attrs["align_corners"] = mapper.attrs[inputs_name[2]] + else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs) + layer_inputs["align_corners"] = inputs_name[2] + current_inputs.append(inputs_name[2]) + # 处理输入3和4,构造assert + list_layer_inputs = {} + mapper._check_input(graph, inputs_node[3], inputs_name[3], current_outputs) + list_layer_inputs["key"] = inputs_name[3] + current_inputs.append(inputs_name[3]) + mapper._check_input(graph, inputs_node[4], inputs_name[4], current_outputs) + list_layer_inputs["value"] = inputs_name[4] + current_inputs.append(inputs_name[4]) + graph.add_layer( + "prim.assert", + inputs=list_layer_inputs, + outputs=[output_name + "_assert"], + type="eq") + layer_inputs["scale_factor"] = inputs_name[3] + layer_attrs["align_mode"] = 0 + graph.add_layer( + "paddle.nn.functional.interpolate", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_view(mapper, graph, node): + """ 构造调整大小的PaddleLayer。 + + TorchScript示例: + %input.152 : Tensor = aten::view(%x.20, %430) + 参数含义: + %input.152 (Tensor): 输出,view后的Tensor。 + %x.20 (Tensor): 需要view的Tensor。 + %430 (list): 形状大小组成的list。 + + 【注意】view 函数只能用于contiguous后的Tensor上, + 也就是只能用于内存中连续存储的Tensor。 + 如果对Tensor调用过transpose,permute等操作的话会使该Tensor在内存中变得不再连续, + 此时就不能再调用view函数。因此,需要先使用contiguous来返回一个contiguous copy。 + reshape则不需要依赖目标Tensor是否在内存中是连续的。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.20 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入、输出的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%430 + if inputs_name[1] in mapper.attrs: + layer_attrs["shape"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["shape"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "prim.type", + inputs={"input": inputs_name[0]}, + outputs=[inputs_name[0] + "_type"]) + graph.add_layer( + "prim.str", + inputs={"input": inputs_name[0] + "_type"}, + outputs=[inputs_name[0] + "_type"]) + graph.add_layer( + "prim.eq", + inputs={"x": inputs_name[0] + "_type"}, + outputs=[inputs_name[0] + "_cond"], + y=string("VarType.BOOL")) + graph.add_layer( + "prim.if", {'input': inputs_name[0] + "_cond"}, + outputs=[inputs_name[0] + "_if1"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "fluid.layers.cast", + inputs={"x": inputs_name[0]}, + outputs=[inputs_name[0]], + dtype=string("int32")) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[0] + graph.add_layer( + "fluid.layers.reshape", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + graph.add_layer( + "prim.if", {'input': inputs_name[0] + "_cond"}, + outputs=[inputs_name[0] + "_if2"]) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(if_layer, graph_type="dygraph") + block.add_layer( + "fluid.layers.cast", + inputs={"x": layer_outputs[0]}, + outputs=layer_outputs, + dtype=string("bool")) + if_layer.add_block(block) + block = PaddleGraph(if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = layer_outputs[0] + return current_inputs, current_outputs + + +def aten_warn(mapper, graph, node): + """ 构造warning的PaddleLayer。 + + TorchScript示例: + = aten::warn(%3, %2) + 参数含义: + %3 (str): warning的提示字符串。 + %2 (int): warning的stacklevel。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%3 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%2 + if inputs_name[1] in mapper.attrs: + layer_attrs["stacklevel"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs) + layer_inputs["stacklevel"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + + graph.add_layer( + "prim.warnings", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_where(mapper, graph, node): + """ 构造返回一个根据输入condition, 选择x或y的元素组成的多维Tensor的PaddleLayer,该节点实现out = x + y。 + + TorchScript示例: + %input.4 : Tensor = aten::where(%209, %w0.2, %210) + 参数含义: + %input.4 (Tensor): 选择的结果。 + %209 (Tensor): 条件。 + %w0.2 (Tensor): 输入数值 x。 + %210 (Tensor): 输入数值 y。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%209 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["condition"] = inputs_name[0] + # 处理输入1,即%w0.2 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs) + layer_inputs["x"] = inputs_name[1] + # 处理输入1,即%w0.2 + mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs) + layer_inputs["y"] = inputs_name[2] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("paddle.where", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def aten_zeros(mapper, graph, node): + """ 构造创建固定形状、数据类型且值全为0的Tensor的PaddleLayer。 + + TorchScript示例: + %input.49 : Tensor = aten::zeros(%23, %8, %6, %24, %5) + 参数含义: + %input.49 (Tensor): 输出,全0的Tensor。 + %23 (list): 形状。 + %8 (int): 类型dtype。 + %6 (int): layout。 + %4995 (Device): 设备。 + %4995 (bool): 是否计算梯度。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + current_inputs = [] + # 处理输入0,即%23,代表end + if inputs_name[0] in mapper.attrs: + layer_attrs["shape"] = mapper.attrs[inputs_name[0]] + else: + mapper._check_input(graph, inputs_node[0], inputs_name[0], + current_outputs) + layer_inputs["shape"] = inputs_name[0] + current_inputs.append(inputs_name[0]) + # 处理输入1,即%8,代表dtype + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[1]]] + + graph.add_layer( + "paddle.zeros", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def aten_zeros_like(mapper, graph, node): + """ 构造创建与输入Tensor形状一致的、数据类型且值全为0的Tensor的PaddleLayer。 + + TorchScript示例: + %782 : Tensor = aten::zeros_like(%n.2, %655, %670, %662, %671, %672) + 参数含义: + %782 (Tensor): 输出,全0的Tensor。 + %n.2 (Tensor): 标准Tensor。 + %655 (int): 类型dtype。 + %670 (int): layout。 + %662 (Device): 设备。 + %671 (bool): 是否计算梯度。 + %672 (memory_format): 存储类型。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%n.2 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%655,代表dtype + layer_attrs["dtype"] = dtype_dict[mapper.attrs[inputs_name[1]]] + + graph.add_layer( + "paddle.zeros_like", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs diff --git a/x2paddle/op_mapper/pytorch2paddle/prim.py b/x2paddle/op_mapper/pytorch2paddle/prim.py new file mode 100644 index 0000000..834a409 --- /dev/null +++ b/x2paddle/op_mapper/pytorch2paddle/prim.py @@ -0,0 +1,541 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import numpy as np +from x2paddle.core.util import * + + +def prim_Constant(mapper, graph, node): + """ 构造constant的PaddleLayer,该节点实现常量赋值。 + + TorchScript示例: + %2 : int = prim::Constant[value=-1]() + 参数含义: + %2 (常量类型由赋值类型定义,该示例中为int型): 常量赋值结果输出。 + """ + output_name = mapper._get_outputs_name(node)[0] + output = list(node.outputs())[0] + value = output.toIValue() + output_type = output.type() + if isinstance(value, str): + value = string(value) + if str(output_type) == "Tensor": + value = "{}".format(value) + + if "inf" in str(value): + t = str(type(value)).split("'")[1] + if str(value).startswith("-"): + value = "-{}({})".format(t, string(str(value)[1:])) + else: + value = "{}({})".format(t, string(str(value))) + if "9223372036854775807" in str(value): + import math + value = int(math.pow(2, 31) - 1) + mapper.attrs[output_name] = value + graph.add_layer( + "prim.constant", inputs={}, outputs=[output_name], value=value) + return [], [output_name] + + +def prim_data(mapper, graph, node): + """ 构造Tensor的PaddleLayer。 + + TorchScript示例: + %4336 : Tensor = prim::data(%out.6) + 参数含义: + %4336 (Tensor): 输出Tensor。 + %out.6 (Tensor): 原始Tensor。 + + 【注意】Paddle中无此用法,所以此处翻译成赋值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%4336 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_GetAttr(mapper, graph, node): + """ 获取attribute信息。 + + TorchScript示例: + %27 : Tensor? = prim::GetAttr[name="bias"](%7) + 参数含义: + %7 (Tensor): 输入Tensor。 + %27 (Tensor): 输入Tensor。 + """ + current_node = node + field_name_list = [node.s('name')] + while True: + input_node = list(node.inputs())[0].node() + try: + field_name_list.insert(0, input_node.s('name')) + node = input_node + except Exception: + break + attr_name = ".".join(field_name_list) + output_name = mapper._get_outputs_name(current_node, attr_name)[0] + part_script = mapper.script + for field_name in field_name_list: + if hasattr(part_script, field_name): + param = getattr(part_script, field_name) + if isinstance(param, torch.Tensor): + param = param.detach().numpy() + if len(param.shape) == 0: + param = np.reshape(param, 1) + if str(param.dtype) == "uint8": + param = param.astype("int32") + mapper.pytorch_params[output_name] = param + part_script = param + return [], [output_name] + + +def prim_If(mapper, graph, node): + """ 构造if控制流的PaddleLayer。 + + TorchScript示例: + %input.5 : Tensor = prim::If(%107) + block0(): + %109 : Tensor = aten::t(%102) + %ret.2 : Tensor = aten::addmm(%103, %101, %109, %104, %104) + -> (%ret.2) + block1(): + %111 : Tensor = aten::t(%102) + ... + -> (%output.4) + 参数含义: + %107 (bool): if判断条件。 + %input.5 (Tensor): if控制流的输出,与%output.4对应。 + """ + outputs_name = mapper._get_outputs_name(node) + node_outputs = outputs_name.copy() + current_outputs = outputs_name.copy() + input_node = list(node.inputs())[0].node() + script_input_unique_id = list(node.inputs())[0].unique() + input_node_name = mapper.outputs_info[script_input_unique_id] + mapper._check_input(graph, input_node, input_node_name, current_outputs) + graph.add_layer("prim.if", {'input': input_node_name}, node_outputs) + current_layer = list(graph.layers.values())[-1] + block0 = list(node.blocks())[0] + block0_graph, graph_inputs0 = mapper.traverse(block0, current_layer) + len0 = 0 + for i, input_name in enumerate(graph_inputs0): + current_layer.inputs['input-{}'.format(i)] = input_name + len0 = i + current_layer.add_block(block0_graph) + block1 = list(node.blocks())[1] + block1_graph, graph_inputs1 = mapper.traverse(block1, current_layer) + for i, input_name in enumerate(graph_inputs1): + current_layer.inputs['input-{}'.format(len0 + 1 + i)] = input_name + current_layer.add_block(block1_graph) + return list(current_layer.inputs.values()), current_outputs + + +def prim_ListConstruct(mapper, graph, node): + """ 构造list的PaddleLayer。 + + TorchScript示例: + %86 : int[] = prim::ListConstruct(%84, %85) + 参数含义: + %86 (list): list节点输出。 + %84 (int/其他): list第一个元素信息。 + %85 (int/其他): list第二个元素信息。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理每个输入 + for i, input_name in enumerate(inputs_name): + layer_inputs["input{}".format(i)] = input_name + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.list", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_ListUnpack(mapper, graph, node): + """ 构造获取list中元素的PaddleLayer。 + + TorchScript示例: + %x1.4 : Tensor, %x2.4 : Tensor = prim::ListUnpack(%4354) + 参数含义: + %x1.4 (Tensor): 输出,list的第一个元素。 + %x2.4 (Tensor): 输出,list的第二个元素。 + %4354 (list): 列表。 + """ + outputs_name = mapper._get_outputs_name(node) + layer_outputs = outputs_name.copy() + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = layer_outputs.copy() + # 处理输入0,即%4354 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.list_unpack", inputs=layer_inputs, outputs=layer_outputs) + mapper.split_len[list(layer_inputs.values())[0]] = len(layer_outputs) + return current_inputs, current_outputs + + +def prim_Loop(mapper, graph, node): + """ 构造loop循环的PaddleLayer。 + + TorchScript示例: + %x : Tensor = prim::Loop(%4, %3, %x.3) + block0(%i : int, %x.12 : Tensor): + %72 : int[] = prim::Constant[value=[6, 6]]() + ... + %x.5 : Tensor = aten::adaptive_avg_pool2d(%x.12, %_output_size.1) + -> (%3, %x.5) + 参数含义: + %4 (int): 循环次数。 + %3 (bool): 是否进入退出。 + %x.3 (Tensor): 循环中修改的Tensor。 + %x (Tensor): loop循环的输出,与%x.5对应。 + """ + node_outputs = mapper._get_outputs_name(node) + loop_inputs = {} + block = list(node.blocks())[0] + loop_outputs = node_outputs.copy() + for i, block_input_ivalue in enumerate(block.inputs()): + if i == 0: + block_input_node_name = '_x' + str(mapper.output_index) + else: + block_input_node_name = 'x' + str(mapper.output_index) + unique_id = block_input_ivalue.unique() + if unique_id not in mapper.outputs_info: + mapper.outputs_info[unique_id] = block_input_node_name + mapper.output_index += 1 + if i == 0: + loop_input_node = list(node.inputs())[0].node() + script_loop_input_unique_id = list(node.inputs())[0].unique() + loop_input_node_name = mapper.outputs_info[ + script_loop_input_unique_id] + mapper._check_input(graph, loop_input_node, loop_input_node_name, + node_outputs) + loop_inputs['input'] = loop_input_node_name + loop_outputs.append(block_input_node_name) + node_outputs.append(block_input_node_name) + else: + loop_input_node = list(node.inputs())[i + 1].node() + script_loop_input_unique_id = list(node.inputs())[i + 1].unique() + loop_input_node_name = mapper.outputs_info[ + script_loop_input_unique_id] + mapper._check_input(graph, loop_input_node, loop_input_node_name, + node_outputs) + graph.add_layer( + "prim.equal", + inputs={'input': loop_input_node_name}, + outputs=[block_input_node_name]) + node_outputs.append(block_input_node_name) + + graph.add_layer("prim.loop", inputs=loop_inputs, outputs=loop_outputs) + current_layer = list(graph.layers.values())[-1] + block_graph, graph_inputs = mapper.traverse(block, current_layer) + for i, input_name in enumerate(graph_inputs): + if input_name == loop_outputs[1]: + continue + current_layer.inputs['input-{}'.format(i)] = input_name + current_layer.add_block(block_graph) + return list(current_layer.inputs.values()), node_outputs + + +def prim_min(mapper, graph, node): + """ 构造min的PaddleLayer。 + + TorchScript示例: + %87 : int = prim::min(%86) + 参数含义: + %86 (list): 输入。 + %87 (int): 输出。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%86 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.min", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_NumToTensor(mapper, graph, node): + """ 构造转为Tensor的PaddleLayer。 + + TorchScript示例: + %other.2 : Tensor = prim::NumToTensor(%1736) + 参数含义: + %other.2 (Tensor): 输出。 + %1736 (-): 输入。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%86 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + if inputs_node[0].kind() == "aten::size": + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + graph.add_layer( + "prim_equal", inputs=layer_inputs, outputs=layer_outputs) + else: + layer_inputs["value"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + input_type = list(node.inputs())[0].type() + layer_attrs["dtype"] = input_type + layer_attrs["persistable"] = True + layer_attrs["shape"] = [1] + graph.add_layer( + "fluid.layers.create_global_var", + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + return current_inputs, current_outputs + + +def prim_RaiseException(mapper, graph, node): + """ 构造抛出异常的PaddleLayer。 + + TorchScript示例: + = prim::RaiseException(%76) + 参数含义: + %76 (str): 异常信息。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%76 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.exception", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_requires_grad(mapper, graph, node): + """ 构造是否计算梯度的PaddleLayer。 + + TorchScript示例: + %356 : bool = prim::requires_grad(%tensor.31) + 参数含义: + %356 (bool): 输出,当前Tensor是否计算梯度。 + %tensor.31 (Tensor): 输入的Tensor。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%86 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.requires_grad", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_SetAttr(mapper, graph, node): + """ 设置attribute信息。 + + TorchScript示例: + = prim::SetAttr[name="num_batches_tracked"](%260, %277) + 参数含义: + %260 (-): 属性名前缀。 + %277 (-): 需要设置的值。 + """ + output_name = mapper._get_outputs_name(node)[0] + field_name_list = [] + tmp_node = node + while True: + input_node = list(tmp_node.inputs())[0].node() + try: + field_name_list.insert(0, input_node.s('name')) + tmp_node = input_node + except Exception: + break + field_name_list.append(node.s('name')) + + inputs_name, inputs_node = mapper._get_inputs_name(node) + param = { + "Tensor": "self." + ".".join(field_name_list).replace(".", "_"), + "parent_layer_id": graph.parent_layer.id + } + mapper.pytorch_params[".".join(field_name_list)] = param + graph.add_layer( + "prim.set_attr", + inputs={"input": inputs_name[1]}, + outputs=["self." + ".".join(field_name_list).replace(".", "_")]) + return [], [output_name] + + +def prim_shape(mapper, graph, node): + """ 构造获取shape的PaddleLayer。 + + TorchScript示例: + %4701 : int[] = prim::shape(%result.1) + 参数含义: + %4701 (list): 输出,shape信息。 + %result.1 (Tensor): 需要获取shape的值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%input.8 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "fluid.layers.shape", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_TupleConstruct(mapper, graph, node): + """ 构造tuple的PaddleLayer。 + + TorchScript示例: + %4492 : (Tensor, Tensor?) = prim::TupleConstruct(%x.46, %aux) + 参数含义: + %4492 (tuple): 输出,tuple。 + %x.46 (Tensor/其他): tuple第一个元素信息。 + %aux (Tensor/其他): tuple第二个元素信息。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理每个输入 + for i, input_name in enumerate(inputs_name): + layer_inputs["input{}".format(i)] = input_name + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.tuple", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_TupleUnpack(mapper, graph, node): + """ 构造获取tuple元素的PaddleLayer。 + + TorchScript示例: + %x.223 : Tensor, %aux.3 : Tensor? = prim::TupleUnpack(%4492) + 参数含义: + %x.223 (Tensor/其他): 输出,tuple第一个元素信息。 + %aux.3 (Tensor/其他): 输出,tuple第二个元素信息。 + %4492 (tuple): 需要获取元素的tuple。 + """ + outputs_name = mapper._get_outputs_name(node) + layer_outputs = outputs_name + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = outputs_name + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.tuple_unpack", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_unchecked_cast(mapper, graph, node): + """ 构造确认类型的PaddleLayer。 + + TorchScript示例: + %size.64 : int[] = prim::unchecked_cast(%size.63) + 参数含义: + %size.64 (-): 输出。 + %size.63 (-): 输入。 + + 【注意】Paddle中无此用法,所以此处翻译成赋值。 + """ + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%size.63 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer("prim.equal", inputs=layer_inputs, outputs=layer_outputs) + return current_inputs, current_outputs + + +def prim_Uninitialized(mapper, graph, node): + """ 构造表示编译器永远不会使用的值的PaddleLayer,该节点转换为None。 + + TorchScript示例: + %345 : bool = prim::Uninitialized() + 参数含义: + %345 (bool): 输出,为赋值的bool。 + """ + output_name = mapper._get_outputs_name(node)[0] + output = list(node.outputs())[0] + mapper.attrs[output_name] = None + graph.add_layer( + "prim.constant", inputs={}, outputs=[output_name], value=None) + return [], [output_name] diff --git a/x2paddle/op_mapper/pytorch2paddle/prim2code.py b/x2paddle/op_mapper/pytorch2paddle/prim2code.py new file mode 100644 index 0000000..d16197f --- /dev/null +++ b/x2paddle/op_mapper/pytorch2paddle/prim2code.py @@ -0,0 +1,392 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def gen_codes(code_list, indent=0): + indent_blank = " " * indent + codes = [] + for code_line in code_list: + if code_line.strip() == "": + codes.append('\n') + else: + codes.append(indent_blank + code_line + '\n') + return codes + + +def get_value(layer, key): + """ 进行optimizer后可能把inputs的value直接用数值代替(ConstantFuser), + 会把input换成attr,所以需要此处的操作。 + """ + if key in layer.inputs: + return layer.inputs[key] + else: + return str(layer.attrs[key]) + + +def prim_add(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} + {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_add_(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} + {} * {}".format(layer.outputs[0], + get_value(layer, "x"), + layer.attrs["alpha"], + get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_and(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} and {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_append(layer, indent=1, init_func=[], forward_func=[]): + line = "{}.append({})".format( + get_value(layer, "list"), get_value(layer, "element")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_assert(layer, indent=1, init_func=[], forward_func=[]): + if layer.attrs["type"] == "eq": + values = get_value(layer, "key") + if "value" in layer.attrs: + values = layer.attrs["value"] + if isinstance(values, list): + s = "" + for v in values: + s += "{} == {} or ".format(get_value(layer, "key"), v) + if len(s) > 0: + s = s[:-4] + line = "assert {}, \'The {} must be {}!\'".format( + s, get_value(layer, "key"), get_value(layer, "value")) + else: + line = "assert {} == {}, \'The {} must be {}!\'".format( + get_value(layer, "key"), + get_value(layer, "value"), + get_value(layer, "key"), get_value(layer, "value")) + else: + raise Exception("Not implement yet!") + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_check_dim(layer, indent=1, init_func=[], forward_func=[]): + lines = [] + lines.append("if {} < 0:".format(get_value(layer, "dim"))) + lines.append(" {} = {} + {}".format(layer.outputs[ + 0], get_value(layer, "dim"), get_value(layer, "len"))) + lines.append("else:") + lines.append(" {} = {}".format(layer.outputs[0], get_value(layer, + "dim"))) + forward_func.extend(gen_codes(lines, indent=indent)) + + +def prim_constant(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}".format(layer.outputs[0], layer.attrs["value"]) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_contain(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} in {}".format(layer.outputs[0], + get_value(layer, "element"), + get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_dict(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = dict()".format(layer.outputs[0]) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_div(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} / {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_eq(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} == {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_equal(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_exception(layer, indent=1, init_func=[], forward_func=[]): + line = "raise RaiseException({})".format(get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_float(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = float({})".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_floor(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = math.floor({})".format(layer.outputs[0], + get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_floordiv(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} // {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_getitem(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}[{}]".format(layer.outputs[0], + get_value(layer, "list"), + get_value(layer, "index")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_gt(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} > {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_if(layer, indent=1, init_func=[], forward_func=[]): + line = "if {} :".format(get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + block = layer.blocks[0] + b_init_lines, b_forward_lines = block.gen_dygraph_code(indent=indent + 1) + init_func.extend(b_init_lines) + forward_func.extend(b_forward_lines) + block = layer.blocks[1] + if len(block.layers) > 0: + b_init_lines, b_forward_lines = block.gen_dygraph_code( + indent=indent + 1) + if len(b_forward_lines) != 0: + line = "else:" + forward_func.extend(gen_codes([line], indent=indent)) + init_func.extend(b_init_lines) + forward_func.extend(b_forward_lines) + + +def prim_int(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = int({})".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_is(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} is {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_isinstance(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = isinstance({}, {})".format(layer.outputs[0], + get_value(layer, "input"), + layer.attrs["cls"]) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_isnot(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} is not {}".format(layer.outputs[0], + get_value(layer, "x"), + get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_le(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} <= {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_len(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = len({})".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_len2list(layer, indent=1, init_func=[], forward_func=[]): + lines = [] + lines.append("{} = []".format(layer.outputs[0])) + lines.append("for i in range({}):".format(get_value(layer, "len"))) + lines.append(" {}.append(i)".format(layer.outputs[0])) + forward_func.extend(gen_codes(lines, indent=indent)) + + +def prim_lt(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} < {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_list(layer, indent=1, init_func=[], forward_func=[]): + input_len = len(layer.inputs) + len(layer.attrs) + inputs_list = list() + for i in range(input_len): + inputs_list.append(get_value(layer, "input{}".format(i))) + inputs_str = ', '.join(inputs_list) + line = "{} = [{}]".format(layer.outputs[0], inputs_str) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_list_unpack(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}".format(", ".join(layer.outputs), get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_loop(layer, indent=1, init_func=[], forward_func=[]): + loop_range = get_value(layer, "input") + line = "for {} in range({}):".format(layer.outputs[1], loop_range) + forward_func.extend(gen_codes([line], indent=indent)) + block = layer.blocks[0] + b_init_lines, b_forward_lines = block.gen_dygraph_code(indent=indent + 1) + init_func.extend(b_init_lines) + forward_func.extend(b_forward_lines) + + +def prim_min(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = min({})".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_mul(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} * {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_ne(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} != {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_neg(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = -{}".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_not(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = not {}".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_or(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} or {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_replaceitem(layer, indent=1, init_func=[], forward_func=[]): + line = "{}[{}] = {}".format( + get_value(layer, "list"), + get_value(layer, "index"), get_value(layer, "item")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_requires_grad(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = not {}.stop_gradient".format(layer.outputs[0], + get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_rsub(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} - {} * {}".format(layer.outputs[0], + get_value(layer, "y"), + get_value(layer, "x"), + get_value(layer, "alpha")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_select(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}[".format(layer.outputs[0], get_value(layer, "input")) + for dim in range(layer.attrs["dim"]): + line += ":, " + line += (get_value(layer, "index") + "]") + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_set_attr(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_set_item(layer, indent=1, init_func=[], forward_func=[]): + line = "{}[{}] = {}".format( + get_value(layer, "dict"), + get_value(layer, "key"), get_value(layer, "value")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_shape_dim(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = fluid.layers.shape({})[{}]".format(layer.outputs[0], + get_value(layer, "input"), + get_value(layer, "dim")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_slice(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}[{}: {}: {}]".format(layer.outputs[0], + get_value(layer, "input"), + get_value(layer, "start"), + get_value(layer, "end"), + get_value(layer, "step")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_str(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = str({})".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_sub(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {} - {}".format(layer.outputs[0], + get_value(layer, "x"), get_value(layer, "y")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_tuple(layer, indent=1, init_func=[], forward_func=[]): + input_len = len(layer.inputs) + len(layer.attrs) + inputs_list = list() + for i in range(input_len): + inputs_list.append(get_value(layer, "input{}".format(i))) + inputs_str = ', '.join(inputs_list) + line = "{} = ({})".format(layer.outputs[0], inputs_str) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_tuple_unpack(layer, indent=1, init_func=[], forward_func=[]): + outputs_str = ', '.join(layer.outputs) + line = "{} = {}".format(outputs_str, get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_type(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}.dtype".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_var2list(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}.numpy().tolist()".format(layer.outputs[0], + get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_warnings(layer, indent=1, init_func=[], forward_func=[]): + lines = ["import warnings"] + line = "warnings.warn({}, stacklevel={})".format( + get_value(layer, "input"), layer.attrs["stacklevel"]) + lines.append(line) + forward_func.extend(gen_codes(lines, indent=indent)) diff --git a/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py b/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py new file mode 100644 index 0000000..2d63411 --- /dev/null +++ b/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py @@ -0,0 +1,249 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import numpy as np +from x2paddle.core.op_mapper import OpMapper +from x2paddle.core.util import * +from x2paddle.core.program import PaddleGraph +from x2paddle.op_mapper.pytorch2paddle import prim +from x2paddle.op_mapper.pytorch2paddle import aten + + +class PyTorchOpMapper(OpMapper): + def __init__(self, decoder): + super(PyTorchOpMapper, self).__init__() + self.script = decoder.script + self.paddle_params = dict() + self.outputs_info = {} # key为output unique id,value为当前节点的输出名字 + self.pytorch_params = {} # key为节点名,value为参数 + self.attrs = {} # key为节点名,value为属性值 + self.output_index = 0 + self.dygraph_name_id = {} # 动态图__init__输出名字中的id,key为kernel类型,value为id + self.split_len = {} # split的长度 + # 转换 + self.check_op(decoder.graph) + self.graph, _ = self.traverse(decoder.graph) + + def check_op(self, script_graph): + def _update_op_list(graph): + for node in graph.nodes(): + op_list.append(node.kind()) + for block in node.blocks(): + _update_op_list(block) + + op_list = list() + _update_op_list(script_graph) + op_list = list(set(op_list)) + unsupported_op_list = [] + for op in op_list: + func_name = op.replace('::', '_') + if not (hasattr(prim, func_name) or hasattr(aten, func_name)): + unsupported_op_list.append(op) + if len(unsupported_op_list) > 0: + raise Exception("The kind {} in model is not supported yet.".format( + unsupported_op_list)) + + def traverse(self, script_graph, parent_layer=None): + # 用于获取graph的输入 + def _update_graph_inputs(kind, inputs, outputs): + # extend只能放更新graph_inputs之前的情况: + # 1. loop的输出i也是输入;i是输入的原因是:子图中为父图得到的。 + # 2. 在_check_input中需要使用to_variable。 + # extend只能放更新graph_inputs之后的情况: + # 使用了append。 + if kind != "aten::append": + current_node_outputs.extend(outputs) + for name in inputs: + if name not in current_node_outputs: + graph_inputs.append(name) + if kind == "aten::append": + current_node_outputs.extend(outputs) + + # 初始化 + graph = PaddleGraph(parent_layer, graph_type="dygraph") + current_node_outputs = [] + graph_inputs = [] + # 转换输入节点 + if isinstance(script_graph, torch._C.Graph): + for i, ivalue in enumerate(script_graph.inputs()): + node = ivalue.node() + if str(ivalue.type()) != "Tensor": + graph.set_name(str(ivalue.type()).split(".")[-1]) + continue + inputs, outputs = self.data(graph, node, ivalue.unique()) + # 转换中间节点 + for node in script_graph.nodes(): + kind = node.kind() + func_name = kind.replace('::', '_') + if hasattr(prim, func_name): + func = getattr(prim, func_name) + inputs, outputs = func(self, graph, node) + _update_graph_inputs(kind, inputs, outputs) + elif hasattr(aten, func_name): + func = getattr(aten, func_name) + inputs, outputs = func(self, graph, node) + _update_graph_inputs(kind, inputs, outputs) + + # 转换输出节点 + if hasattr(script_graph, 'returnNode'): + for i, ivalue in enumerate(script_graph.returnNode().inputs()): + if parent_layer.kernel == "prim.loop" and i == 0: + continue + node = ivalue.node() + script_unique_id = ivalue.unique() + inputs, outputs = self.equal( + graph, + node, + uid=script_unique_id, + parent_layer=parent_layer, + index=i) + _update_graph_inputs("equal", inputs, outputs) + + # 设置graph的参数和输出节点 + if isinstance(script_graph, torch._C.Graph): + graph.set_parameters(self.paddle_params) + if hasattr(script_graph, 'return_node'): + inputs_name, inputs_node = self._get_inputs_name( + script_graph.return_node()) + graph.outputs = inputs_name + # 更新split参数 + for layer in graph.layers.values(): + if layer.kernel == "fluid.layers.split" and "num_or_sections" in layer.attrs: + layer.attrs["num_or_sections"] = self.split_len[layer.outputs[ + 0]] + return graph, graph_inputs + + def _get_outputs_name(self, node, attr_name=None): + outputs_name = [] + for output_ivalue in node.outputs(): + script_unique_id = output_ivalue.unique() + if attr_name is None: + output_name = 'x' + str(self.output_index) + if script_unique_id in self.outputs_info: + output_name = self.outputs_info[script_unique_id] + else: + output_name = attr_name.replace(".", "_") + self.outputs_info[script_unique_id] = output_name + self.output_index += 1 + + outputs_name.append(output_name) + # if或loop节点没有输出的情况 + if len(list(node.outputs())) == 0: + output_name = '_x' + str(self.output_index) + self.output_index += 1 + outputs_name.append(output_name) + return outputs_name + + def _check_input(self, + graph, + node, + output_name, + node_outputs, + add_dim=False): + if node.kind() == "prim::GetAttr": + param = self.pytorch_params[output_name] + if isinstance(param, np.ndarray): + if add_dim: + param = param[np.newaxis, :] + self.paddle_params[output_name] = param + graph.add_layer( + "fluid.dygraph.base.to_variable", + inputs={}, + outputs=[output_name], + value="params[{}]".format(string(output_name))) + else: + if isinstance(param, dict) and "Tensor" in param and \ + "parent_layer_id" in param: + if graph.parent_layer is not None: + # 当某个param被2个控制流(if-else)赋值时,else不可以引用if中的赋值结果 + id1 = param["parent_layer_id"] + id2 = graph.parent_layer.id + id1_part = id1.split(".") + id2_part = id2.split(".") + if len(id1_part) >= len(id2_part): + for i in range(len(id1_part)): + if id1_part[i] == id2_part[i]: + continue + else: + if id1_part[i] == "0" and id2_part[ + i] == "1": + if add_dim: + param = param[np.newaxis, :] + self.paddle_params[output_name] = param + graph.add_layer( + "fluid.dygraph.base.to_variable", + inputs={}, + outputs=[output_name], + value="params[{}]".format( + string(output_name))) + node_outputs.append(output_name) + return + # 若if-else外,则可直接引用if-else中的赋值结果 + graph.add_layer( + "prim.constant", + inputs={}, + outputs=[output_name], + value=param["Tensor"]) + else: + graph.add_layer( + "prim.constant", + inputs={}, + outputs=[output_name], + value=string(param) + if isinstance(param, str) else param) + node_outputs.append(output_name) + + def _get_inputs_name(self, node): + inputs_name = [] + inputs_node = [] + for script_input_ivalue in node.inputs(): + script_input_node = script_input_ivalue.node() + script_input_unique_id = script_input_ivalue.unique() + input_name = self.outputs_info[script_input_unique_id] + inputs_node.append(script_input_node) + inputs_name.append(input_name) + return inputs_name, inputs_node + + def data(self, graph, node, uid): + for output_ivalue in node.outputs(): + script_unique_id = output_ivalue.unique() + if script_unique_id in self.outputs_info or script_unique_id != uid: + continue + node_name = 'x' + str(self.output_index) + self.outputs_info[script_unique_id] = node_name + self.output_index += 1 + output_name = self.outputs_info[uid] + graph.add_layer( + "fluid.dygraph.base.to_variable", + inputs={}, + outputs=[node_name], + value=output_name) + return [], [output_name] + + def equal(self, graph, node, uid=None, parent_layer=None, index=None): + if parent_layer is not None and index is not None: + # block的输出 + input_node_name = self.outputs_info[uid] + control_output_id = index + if parent_layer.kernel == "prim.loop": + control_output_id = index - 1 + output_node_name = parent_layer.outputs[control_output_id] + current_outputs = [output_node_name] + self._check_input(graph, node, input_node_name, current_outputs) + graph.add_layer( + "prim.equal", + inputs={'input': input_node_name}, + outputs=[output_node_name]) + return [input_node_name], current_outputs diff --git a/x2paddle/optimizer/pytorch_optimizer/__pycache__/optimizer.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/__pycache__/optimizer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2290a4b0e9adbef137d9825de3e7291893994a81 GIT binary patch literal 1041 zcma)5&2H2%5VrHzv}{2wAV5M0ZY)TJtgfyQpwB;t?0Z1SSKKrNd$I{!J_I3% zpgGyaub0!pOFT-pi10;lN<<+26F&(=_?C2|Z{SV3w4#qdT-o7e`I;*}mNvzMhCEO< zfy=ufBuS_si6^M=PD$b~!Zrb`t6P?v$s6OcB0GXO6s{oQcM13gO zFU<*qgdwSTJ6TV)Djipjji#k!2J%X74!KJwO50)}TIPG1a?%=|b0^s_%QGdpZJl>U zX=`_f3l6CrKuoRkOM%v`EGOK^miL;l+URL33qUEtY-9;qP|AGtg;*@>$8Nt+u3t{o zjH?k><0l*a_k$fSL@t?-2Qt^De>gC;p|2Zw`ex>|O(*P+<+s)|vnpmRQ<-Dz2ErDD zAYMRm9o@PNbg%JDv0F$u5+QJn_)}002XnGVUXc$Ek3C1vyt#KyPsp)9_s{$}J*Sko zK+v~een;jMI^t~~#h;Gv3$5gXZe%g2^+8louJy;MsX}f{KC41wp&L~+f+lg- z4Qzx?;TR_(Sw`HL;)P7YTCH(g2rR6Mo62#@%6imk2i6K*|F_!4ce@)RNEnu}YK5^v zizz_Pst#lOQ=TtQ>@vi(*HB=$S^}T82-S7q5|x<7aczOv$>XMHz2-#=efg_rJ($u= ot7PN9oa^F?#cy^2qaNb3oB^I)d&91zO4?GJo@&2L zpQxZ~AECFLG9$-HC=tw$W=1ogKIwcs9uZi7u4n9PLdaj->{klT8Hjxb>LcNVM_Uj< zHc1)r_>xSL8Zvha&hP49gChvkkTT{`nedn=TT-SxpnPgYJ2yBosOxFoNtfGRt|~dNsOzbBCE7+SJ}LmwQH2jy572MCbFi}t>}bwg1>~D9T&=d zx(|IYMwv-$kN`hem`X42uWbe)8X7+bcMo$EV21GHbPl2TC%M`Yige1V6JV3Nwg0!L z3*i{%U))WIZ;9AviU~!&u!u1~VysiV2j-bSV(eRAwY!YL&t`D8IRxdO)Gg523Aa?- xAZ-suho{Bs8T#~%Mc$PLeAs_6g# literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/__pycache__/pass_manager.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/__pycache__/pass_manager.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee30c790cdcf7c943197be72f3597522e922677e GIT binary patch literal 1118 zcmZuvO>fgc5ZzsW#7P~gpbAqSqmST{1Y$9c_8sZYNZ(t&^kqZ1!kd#B z?FXRyqG9zW=z(Zjy(LB1M%m!m^X|L-<4A~9av?v+ zG%vcJ_6uw1=7mXy$waDdF*do1&v?xOnE0?lxmS$Kfb%3v4CjbgV?jbETeVj9Rspef zf*-#~p9Q`R%9I##PA?j2Yf4d@l7m$<9j36yGC(e%ZqZ?-n?*xede(B=qCyJF5IW$Q z{K(O}0~X5+VJ$k@u7j$F#Spd@1ZRPTpcpbEhH|!Kw&Rp;nrKr&^09=}H3ay!g5kxf zjMQWJuP9-Yd-dwp``;kfWpPt04tA<>E*VfhI671{u!b#kTg^}N{9;r*sg=85FITyH zuwyVSB#-m5#rZH7BN#s~L(boiqIBtLa4zx~%&tgcW9uppQkS7NkxAmAG-tU`O$34@ zsWx7>kk+^?O`G7<7#1*>`7CfkFLbSE-9J9S;nnuxk}Sw)YUm}K!Zs`zg&ncW4I>B! z(%3F+7K;Ox!iiITCpm2#6IXHogOlnh!a*tt4QNOeZfy^e`uWMZgl=)!tnsdf%HW{l M?1k+Lac@5S2c1Ilu>b%7 literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/__pycache__/pattern_matcher.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/__pycache__/pattern_matcher.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75d5a20be280945c6ee9613e5f85eb2e30bdfe90 GIT binary patch literal 7351 zcmb7J?{8bzdA`4SNl_9d%a-kAp4OsX=x%i4HHeoPLE;H&fwh9+7)Ze-;3(ZIn-WD* z=U%$BaEUpQ6H!TQYi-t~O;SijlQ>0_20`cAj++Aeun+wUiu<8JQ&E-zZ2i6g+n(oK zlA>t4>j>O)&pkihbIyC-AJ6;pWPg7`!O#A$soZamE6VTq(f%2zJc-9chvF!XT2$uM ztg0$sRUFOHFDZ`hYD;=ncMQk8q+|`WTTTq^G1+c9_R~r_zKz*RS`GV7*@quF`psj}33HBn-Yu3ZM{CC_ z^2kTa6~8cFSaikF$_2kHrqAWNTB0>`(PAP~xe(g9T%lC(bGbd-F;`VoOIsY>eZEY4 zRA~6+N_m=W*$$G%*q=YdHEkF0eF=HVtiu(WXXisA=np3YN_3V2C~` z`Y<;%T<5HtHo{TI_1$Sd*J6>Eo4SzWviI!ZoN!dQ^X2ny4v#42D;3v?&L}LCY*t-T9b?WAXTVUKBLeGy!Wj!^o>3MKJ)<}#*c+>!$7aSh zS3YevKH9i(W%Js4^Y+!|t2eh_ynN^Tmzp1}HsAd8_qSh7jCK8*H?KE8dMWC|pn|jU z$vc~`d}njzpEj?2yLszs^Mf}wUn4=UIwkp%oB`3tV_es}SO4ylQ0DNVloz!FsMYONy*3uuY#&!v_2-7c zk9a*neqjc=Cixo}2b2R!y)Q_BD}A%poVKcq@gNbzg7^sqEwOqsh+_qN&RSK)%Rv%z z?YWq^9vBp1??-_yt~uJ%N)TI%2XV>O@zQZW!8t*kdqEv@f+Xv6y7-UQ%->+*vlE%TxJcE>d%0q9-)w+zVc4IE86Hw4#pCEcouc7g}?0kZv*5 z3QlO#OnY!Op0aG^Opm%nnHa5kEJEy z{gSGoCGZTOEvjp1O{l{dLGQ>hoIRi$n8~pT%wuapi@rHGCEKHKP1`S5;aClGM^KU$ zOVHCu&62c|cqg%LO7^9&qXDcS`_#n~r*`46%zcnU<^*IpU-X{H?BWDvdory~q~qex zQ56SR9ArVSCBZ{z5l#Rq9A?X(vgk*Vwk4*J0w<2KHN)a4i^oue>TGCAah2>&tCH7( zC_~O4L&c-CD&|1tL7dLO8AtG5Jp9L<=u8KL5ma|z$0dA7&_PIR*Y$gX4g&_ytXqK& zv#tYl7yuoB6Ji421RXINAPG9GF3@4u;{Y9T2|9Fuj?)r!U_@ZIK*wnqtOo%)3>X@- zI)SY;fAO8|=a;wMdwzTU7C^)1$LpJKtu$}nYBsKKy?3ek;r9V8TEf2h`t^-p{PNCg z*S3E8&gOS-0-wOw{ICM7(9k3AxMDQmzu`l5;)oTKIHDnswHrsQ zsAz9tF*)8?R+iP}*s`{)FB{9|vNdB%)Sz$lq6bFiHS}?8XG_`*t^y+0(pi&n^gjRRsp&g zb5vjv4J#`rl-eXuJgae(#11ELrH0%Mptg6sp^3?b<2biQdc;D+RSj#+AQy1iRuBW$ zIZm1j$DdI^%UrVp)zR0r7UDSW$JiRJfVzS@Xvy>cg!5`I%RRJ4z<<@`9_+wwm~)o6 z-JT&=kINl%KN2}%G~(*vaP&q84r8b9pKVRFJ@)lR=W919~1rPb}yQl1m!U z?dRArL@}`C*XtP8W}A{S$qLSsKXLSeNOjBy88&bqyj(8 znpA3rG~%yVJc=Saa+hI-lgg$_?zbYJ2#1hEi})K3-oxfyU4>Hifp+6vlnNzmnHE9( zC5mvkaJEzyZjR9*%s)M0WR#9@$d_6C1&fDRP$weW!q4~dcqdUXdSU!RTWZ68CSgHo zzxg$SH!QJir=_O_rp2W#re%-bi(mi0tr7k9z3uBxYviRT(lN0Q+v>UsMg?7Fm-aPn z?f7z$X1}}{+SgDAcVSDP;IP;Zx9-mwX>jT0*70}`qG*S7baN@~;1`E+SQ`$7Zt0J4 zC-*c`);uzx^_}}8&Bj?jO3&L zUzfsDP17CnM>ESuM)fnOoK@;-?O6n&3avkscM{5rz|}zrS343&(~H(aG(dq0>V$>t(@C9>A5=jW&z2J7bwz#uTCL=i3 z<18q?nXv78;wi!QU4E7PkQe?#RB%a(gUf*zRpI9VCg9@^28bX6Tovjk17zZ0p%KA5 z@S^xK9g7kAx{7E;DIH<^D0~-3jkd=1jvQ4u7Cug=k3jqmv2bLR56Wh1wB?u)-8%el zzHBxHE-X#|SsS{i;`;>2BG&@^i?|_FEUX-hDojs&1iq zN}Tl`@4$^Q{CWtEJ>>Z!{rK3{zx=$}czff`4zE+Pg!X9VMUx_;)-Wy^j);@eEYe zR`9!8ARXffo0oFZJH+-Za z)?`9XLT1xP3WOFP@dH$Ijj$4`ypEkLd;!*;Nl9tpVNT+tE6<2*FCC4TK#@*_3EUqn z9+K%^*gsV*6nUdEw;qln8;eMGzTMEyTg$+#3q- zMEVK_vr%RjeaO}#e=9>;h^jH006aOyH(F(Oec>3wRvQpR{)1E{z#3~d*0#U*qm3Jv zBSQ!CwsGTw?bm-MrJ{ND<>rgmn?JqTy!Bf1>I<9y{82aCcZ7TK&`hyfa5ByXsnD4z z*n*5-&YjPTLVl`<%-8vRvFaiVM-A})ew)nN{%<7aCewZQlJO`t!LTMYab-}LgiLjL z-zxa$A`vFiktTIlf`WoA&ajZuoMDZTPD}YDYf7#4Fz)Sn`T~ZD8EBtnX@4Uo9zK$-DR_htx`KVo@8|w$d%q zqHWODx=lK?3%Xi&NRK+e(Yi}q+6R5Ddt^X|V5qf2Jn92qYnO~@00OQ1WK2U4YCRxV zGy;**3dnm7Cf8B9_@Q5A^iZigQ z;IQdcY+teNZfh`gm`|RWR>V*dv+P_Y+?Xy+##5Om7lCRQU+v9m~Xysd@vmL35>5l--r(ZA>Z(4H$n*Z!Q25jMHDSa zNi#|@W(6yG#(m6-pbWE+5*@xIDp27mQK6<=zNJ~Dqb;{ z!Va*#^*;h)w|C?jvM?T6%mG^jmi? zKDaiN#uTf&5IV#rG@|Q!7gX$zPLc?Pu4Ji&X!}BxwOWFXq8$ivyp+Y+PJpu)!(NBH zf8k{v(PGKxk!s^7ogz=KAv5GTpz!eljR(H~ZAA!O literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/adaptive_pool2d_fuser.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/adaptive_pool2d_fuser.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b452fb942d1e3cdc24b534efc73760623e3c80b3 GIT binary patch literal 3896 zcmai1&2JmW72j_zDN?cJIF3^_VOpma6N&mXDI1beyGU~>jG*qJpb;$AoF%o&a=DpZ zx)ebHNhBbLLh+#m3N%3=r$vxng5=s1N&k+$CYHi~Aw4v|H@l=n%797C?99B+dGp?z zH@6FgoP^(h|99W|{8>r*jwZvOhQY7VLh6{rWKXKe_$r>#QfsPAYpSQU^qSr>YKE9= zUgi(d8xqr*u`e;hQTNoE$>h6IDKh{U=}t+G&EIxH=e8X>B_*DE(`L+b-r#nyHJtp; z-f=kavGDNz9fWN_#C=OisZhur)(=E;y|Vyf{l77&aj;(sSGW1 zVqir(fm2drJ?sPy=LV=_)3RLOjVudTTIhHUe6yBS_v|pVtba*AHWyodv>0~$9oui% zesgJg@sBHsK`iDxaJ+V~*j)*Pg%;b;yXE#Az8LI8ZC>B9PF5P7&~5vRcDgw$5WBOm z?C^51!|9A^L!p9Bl6$jHFhhB$?Z_S`aH;(UE$o3*7+TteO}3?Nnas^}SMJNq*pr(w z%N)QbQkcoKWL?0sXW%(D1vu6PJTKq{HhrKa91dmnjNkx>z&J-3kB0n}NELFFL+z2Y zC$saHrOVQ;66t+~&9Dmx%C6d1q2aTx1Sx`+U1ZM*?#iLjS0CXlPho3)TGePZ)&G*9 zeEy8ym&f+?f`FfAKVt@)JwODE?H*8}`_IpCxx`+CURW3Kmjt}Pio@MQ^OuDjKm^7w z#yP+`zZ7t+3;2~iI)zt&r98z(I(Dt6HZuAKB>jqAO=tzDIUy;_vQ34Rgg%7xDllLR z^nkP1K2|WAKc!Ow)weTjVMFTcu%$t^WaY?Yi=^XjCbi5prYF4Evf#3kaKSk9H|4{j z{j+R=Jd3T0m|H{4RXXp$0gFcuAO7&|mxGVr{o%9sjz0SK=-|`izwZy-zL>cEiwh7A3grhL&VZn+kgDUC&wRt`Tf`bI6i#u_@Dp&{+mDF z7*P^~Ze_i=S!{S6mz6!rPGLFRvIA$%^@C2d*{v*lVO8|8%*NYpQ z#g#ECw^8f@Ixd>TbK4D_;_s<{y<2yX2-^PK)zR?kq=Z#SxcAl=?{0NXh+V(8uyUig zvbVl#I=jj4IvM6(WxiN>>mCt|OAyCUT`k)|;P`9~*yCcl>&qY?JEGy4r|IzIu;zuQ zSRu0MX1BVUPAP+LUbP=IPuX*n3x{{ZUCM=dbWDyA?R)Yix)GE+vD`&=SHp-dF2!ZM z0Gy$0y-Ygg&tNv7d_N%dN-DpI z52tG*mUmJ#w=u{P#{co-8A6jek@Tho7iZ|5adzaLry21Jgquwyx?wb+i>a3r^U!na z&VWoiVCYKvDfxOB**uC>oIGyQ$O#6*sL9rr2c-FFG3lO{(yhWR2j@soD!APa2Ioo6 z3@zq`^rjQJk<%heGN(HrJ;yj|9@Onk3Ead5OA22A7JrR6p>%>JQ_II$ItNh^#47yh z$JwzXn{Z3;A!#WQ`Q(5M55v+2^%7sEX)eyBJ`rb85LhA&#YU2igeI}}wzCs*s$LS5 z`|zE&1((=>W4eAb)`fSKOg;rHPWz8dw~;WbO{W4ZLAW$AU#{gmST4majN#UDV#n}V z9!`3GVa%Z~LuA-PCtWaALsn!%o<@5CZB~8}EiY?WRb&(Y9A-KBdB7BW&2&A%PF74o zJ0s7GC6FFzCzMC^Y)gq1;zcMku>l zgzup$$`Bbvh~7uxv#s@YT!iW&E?aRCCP{*ekTl|4U~{Ab1b7%1L|{9AO6;1rR271F z1zoHL?Vx0c(~5ObX!1>3&$R5QzU72*)*q7A@}r@3HvgP+ebj=HHNx@0CKRSBMcM_L}g%PM*5z#o9*lUEX%;ECcego6rqeGOlCF|6hj8emkf%~EJ}$il@hzT zeh#TQcV{4cUBoKcUql<9rO_gFE9iPpi_lRjEM-oPDAp5)9|2>Hx%G&X!<43y5Tn!L zx6swHNvPs%VqFY{B&Nulx4*^%WAb*$D?uHvfk;~UIG!13BCcToK9Y75*N~EP2KT4 z%Q{gO*^4qdr*thIMP#c`JS^nAYTh+7Vihkl-S4!5omlaM+S>5VQy#r=@Q9gs^OT1P zD2W8l>C&oQ9Ff-B+$jgVji70XoG6YZo*75zM6M?^I8iwnC71R_VuG7=!3?!VsF>R5( literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/batchnorm2d_fuse_pass.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/batchnorm2d_fuse_pass.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6002b385fd7218d6e20cf0362d8d925af9341a67 GIT binary patch literal 979 zcma)4OOMkq5cVTUL)F4cfCPdA*MQWpJtMR#65OJdxS(7tZ|$~$ys%S5t8(gI`44d9 zFZs%ezW^6z913XF!#bMr#E<#r`^K~Jct~KlUl*bd2>FgbhY><>24-jA6j8J!70oHd zn3b&JIrlLygDT8JN_6;&s6d66M1`8}`JU#Hj`k!UsOU9G20tJVNvI3XrL{1h%w#uT zWsRw(>gC#M18d_J_Jq;P!gkus@y1~=2+qLlBXFAJRFRx1nsdbzza)9kW9MOy=5Aj} zkErhM1!C47Q!D^fAmE%rN*p+IuoqzVCb*7#p*!-`vjz?{OPIkzTr5OU7o8AD$m(+O zjhGv-@pGA0b(h+8y^-}o*2|}->6_`fRI1cM=?}VW+VtbJ^$Tg!c12ZewMpAe*O>WA z{9A$Qil$DF7CdP;W&mjSz%157==R|ezLP_hbAJfVAv@@F&k(P7WKTY^9eYc6R8hq~ zGwdpG<(Uh*rfq=WutL?uyP{UWA5*wN(`q9-op2N5f4DTVUAYi7HC~C{;rnu-Y}<14 z078q{ghq6G|5&;HT$V&AZ!Ie=gc}M`HEIp&2{#hr{aTiPb^@He4CVxq{s)SUFos30v)(sC)$yzQ!Iuyv4 aXt3mCq+I;0SLFFEWQIIP6h1zp@#r^K@&IH2 literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/batchnorm2d_fuser.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/batchnorm2d_fuser.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf172eb4a472ddfc4f565aa2b28d57a6d913cb44 GIT binary patch literal 4213 zcmb7H&2JmW72i*CNlUWgk0f$kmrmN+bZSZ@^Zi}M16e-X{+XTt~(BobcOW9+uZGrapW_Br3R1ew3&d$#J znD>6~&D)uKlaoaS*BAd@wSP5(H+^K6fzC^KLTZ@8R8MKBcx#^4)*HG?W4dRw&4$^| zHFDzH@bYgcuPDr9xm|_j9DPS`SWJCgDd!VtQC7-oY~AmM&b?;nl(qQOYfZ*H=M~-z zHnNW&G`AfNKJ{9QM6ydyJK;P!jxeR6y{D|HOl8`x)zF#Fj9sN+$jKWf%QFk#x%U)9 zY2;x-A)db1j9MEj9p0`nvVynNR4ct{271D$(L_q5_LUuNQzQL*D!B(8`bwN<+v{X6 zOe~6y9c>3rS&z-I8#tWjARSw_?fPzH+u$-n$6Ld@VB0OP8HTp~k@BzBY}=1!!>+&G z^w*mH`doGPM>UCt&77Y)UMHB{ss#d`*-j9-ZMWy}*l?MQ0Q`uArWM;Z*UuC%+ zbzNooJ!C>kvzQ^r{|~*uibM2MLVkix?io_UzRISA22g~C)5P&O)2~Ikm`B?;9xFR4 zo1Rf-lm}X5_BHktJF}-f(EBm6GNnaMIL93uYcTD6hioC9YZ!xllEPjPJl{LUBfY_BX73=&1cLqCW}aQY zO)(}-^i{!E4zbLOSP~+$!dCbybj*nvP7NU@^9EPkRuRi}c`QYuZ{_xJhqt6faetY=-?og$8^60a}Pkw#y z;qDi2zgsM&*D(Fh?;iZ|@sm#yR{3n0<@bDKOfBDJp4)@11*JTxt%6M1_H=2QS)%AmJv<&)u`6LO?@voBb7;y>egx1T;rTGy_cZ~>3 zrBMsW3f^=>r}RT=zrWRTP&zyQ)hmPUmC;F{=YG?Jz__BV`a<>Vxg$YXs0phVsz09l z$%xGpyOlUz%sYY5E3%`v-EWeb9Bac+#hRbM2uAq$q}Qgw_$ zL>xRPJM;CTj>!mQ`Y~4q`MyLsIFz7+e`2;dc%A-<6KI$kVU6yn&(M}YTZz>z{ND92 zqHT-jGCm0&*-W7qTkDQ*`^~mfBL(Tdcy8=pyxMg=W(UnEa=2f)?R8p6%sZ7#xOZlJ zVa29cS-s+_e_iyh&vCLATdSP-4&!{M8)5NT2YifSjT}{O7MLGLh-KLk&ex+9s174>Stk=a=dMTmJ zOe%+Yy1*kyo4s9Sf=!L*8_u-10|MSs5|MBYTYj?}JA=LA3Xh%*M#S?zFZMQa> zzVCQpY_&UW$B(-0IPV0Z>veo!NwHeNrZ%5uOk9AEwkW%CPF7#xYHYmeY=>NihgepT zS#JvQ*c3TeE^>l~pFz_oxNA~LHJ(Z}ONqH--_;}5T3jUHrCj6us8}l9C}xUl)stDO z$Jfo|$cn)EL<#?jMwzknoT{lgbqdb~JQvkVcqUW>qnc{rF5;^QS~g0&L@f%c0g3^d zeADPTbs7}XK%=C)m`WV=XoT+KKsJ*ZomI|VgbryVQOT;_r9oPx$B)X0gDqNwbkRDX z{>ykmI_~t9eWkBq@1Q*}a4gaHHL(L$vHvyu*mG~{eG|I|eIL6C+G&WRinbz4ff-MV z65{+!#z*EqspWMvu^w~+Bq3!$Y>ER-Tnw5VVex?E0nRV^)YB%lr zIDSO-fajOc9MYmy0Pr;6>s=lL;bM7StD5Tto+zSKC*~f9&^x2?Gki!RQ;J z7V`_>$rd>|SvF;I-$9?>MH3g~;(+O~DZ0nuhSlDtr&$f?j!4jwX#HrGVLnYN^fg=)C zx3;XJ7)1pzQ7nS(So4E;K07m31}8)u1dp8=E7<~-q2-(keB=B;wAJBGCEy(-LtE4h iPT3fr8D;23t|u%wGFkTIKkv4L;11Qu%xYR>`u_lS`4IB} literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/constant_fuse_pass.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/constant_fuse_pass.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb602f35e197091071a40379c6c1ebf7f1d6d4ad GIT binary patch literal 961 zcma)4y>AmS6!*tnazqJK2q6#)>w%P`oe`>93AU&b3*2Ivvy&d&7q){)6sC0Le}IvH z$tx2pBP_heI8|`Q!aQ?4!wKM4FgbyB0%o2w{()Q$*2<)HJ6Q zb5^mM=RD-Rit0FzDbeu@q9PTa5fy8?w_oGOwtMRTs0;%6j}df+_nq5R&(+KA_G z7w2eV4sZgY3JS%r;m9G5A?!`)9Qi^w?+%7w=zS3x(8vA4591$mt{_O{EDyxHlZ%K+Fl!;b!1CEu?>4mH&ju@J~O;VP|0UL za&6awv{5Y`uzFWE3Y23BKWsZ~q|+%k3I4|mBfC={qnjpp(0BNtd@Q@JT0MZ&A~vA| zy1sjbYuGeP2k2@eYb}Hy2~oFd38o1@7UKO!_~KBmd|4>p_oM*si- literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/constant_fuser.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/constant_fuser.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a115505d7c01b25995450db6a89b016dd6ba453 GIT binary patch literal 2174 zcmaJ?&2Jk;6rb5IuU)&Offkjb${av-h3tL-5-JsHE6M>y6(9wzfMMgEq#LhyH#6&E zM;j?^A{B&6D{-T!N&q3ap!P~p`wwv7JlD2K^DmSGyfg4#+1H3gJO{58%UBqP8DntD zl02G+f0?m%!;Y3TM0Ry4S2PQ0Ujuf150qTAZd73cf zWRWa_PeTaw0!5!*x2Rs-Fg6J|1Xl43GXoo3S0_L;8lL4f3h*jhOEtIbIi6}OGVgi zj+9VF|M(7i3hYO|rAn!vWn9Fn3=v`x;#Aply)X*IG%{1M z2Y#Y99`*vIYWf5l@*adG>B2xt4xOSEx_oFdL=&gmn1t9ct~PWH)m0#Nk^eQoHa9ou zCRu}c9V9E3o

Tb33;eAv<8l0m=$2ph`Jiv^SjG3d~I-FgvEm`$upshu!n!vzJy} zn3ZxD&X&I-c{z9T(i$ZF(&w{9QrI(XR0bQGgCKeyy8Ii4{+kF9R0%{K`P5i3SFGI1 zX~z&%y<_C&CIwg-Ff()WG`UEYs*L%}h*p$)w`OO{s0#x9pN zVxlBTmrI%q=LHs!oqwS<%3>eT$__K$mC8($RM`L)A(hhs#PCQtb5YXnz+B@-DwhWf zTtdN6<^snG@Ks?pP508}HzuJO#!zK47V|HQ zOPi-b2S1W$nv>U|TLgjutg@m|X3FaD0hq3}kvdt#A=p>t!U>RSqK-_5k*T87NZGp1 zs!Hk$@B!o<;O3Ik^r|BUhIhS!cVU`B%A!YvPSmMq z?ZYjJnN4VQdP-+&1stQZ_VE9^1|A)F4KR3&sS0D=B%e8nBHE6-0 zYQx8(&7+91-9iFtR`HMyA!gcs=n@GCpXpZh^V29RJMMMUfimJ$of^F(&GD-N@53|i zNOM?}Sm9S8um*bj$E@u{@MbC!h(uRlwhOFBYI^cOFAF2B!LGu)h*R$A!uB)I0jsWj K2)QZhn*RaOG+Lkl literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/dropout_fuse_pass.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/dropout_fuse_pass.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a25a9ab9cc2baeb8107f07126db5b6dd6329921d GIT binary patch literal 955 zcma)4J&zMH5cS7yvP2075=bY}&@M<>?rL;89SFMUq-)SNmWiF*!hT_U5s5;TYxxgQ z@|WCF@fQ#UGhPCxKyhBpcSVi22K%0OH$FC zQjA&2DxUKY^D?U9Jf=j)FNum&d_`2O>6UM4KG1wi@}U}BkaYMR@{pAJ=v-P0^Z2=G z+NN_aI;#z=CoSv?qnCws+RX9vZZHT=!R$kDn&ecGoGF@f#T36HdDP?PagXE=uBApa ze^>QbVgX3*Cqfi;;eChJbVkX0aAR*N1x?r@QLr;Q*XLR?yX+8D4c{OFps>zOD zGaor<1`r!n(m_pci&_D1OyP%3tBrIzH7b8PtX8A}=%t{Cn;b1mIP1sD-O7B$d(mOrfz12N9 zGwDr$-2{z64tg+R2&gf7F>nbe($5s)az9O zt@igKGk|)GlkH{!aT@*!nX zJHlKyJ07PWk*50`P9gJ6s=W6HsW{`OqG_mI7kth`aZdWlrR~WF{zypRqer?Zvfa#* zNZ$fNXjMAKb+SY`<;I5BF}caD4briS%RBaUVv&x6SF74t87J`|!<%K7mM7ter{bT! z0$oNjnv->7#lSOfP&5hT&3WbW(K6}?YE?DCvQZ+Mrn1vPBBXRcT6v5GQIIhPTvjT= zB^WD=bwfW*8T*a=#oGNSYo~)~v^;yPy>PrhfN^nIgmKay9#1qR?KsJTelQlY zos6U|5y7&BtE6;wpx_Rr- z=8Ye>zTfD^QJVQt_Vm`ntq0$1-rjh81HvrNcHRlu(-#Nbx;;vfjQoB1|d9h(cpo?{kUjbq#W4?rV&&@mZE?KACJV4egJvSzEJGWOLr(nkc3fv8p zo6{Bhj+0y5ylZf?XUcqd7}s*(J4-%2xaPsk%{}0)d_nR`?&R(|1kmbd7mI9i=DVm2 zHV=NxoR{EFzk_ag1mg)+f}>vh#8@-etlY|J&ycm;z`fj@BNxbO?IK1bq-oDcPix8m zo^OXA0&Rlq`89iZ29++)ItgbvpuDY>cP7d&a0}OR=K$$Basdv2i4hpB^+mgwvt26=vU@K4pbL}F3_7w|7)|GITJeatuwk^Q$uGjG{%rD3_#6s!}+xw!CZ^BlJ zDM@IpzL)OR0L|!eeCU7Sffoj9KYEQZRb{Ln^FfGojj`2%9~L{hI!i3i@;G#=UOo+7 z5r&NI6adf?Q(bGRAC~9#YJ2&>0}~ G%zpvG0#^b6 literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/fc_fuse_pass.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/fc_fuse_pass.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b354b48d4c33284c9f1f4bf7643cfee6f1dddae GIT binary patch literal 925 zcma)4JC74F5cXqtvqU*K2?`Jm?Sho$uA)HbbVx@RokWASz%pwmx3CXmZxD$>m23GA zQ1X}DQt=lM1v6g4MTp`Y&3M+1=X=lvcl_9m7>W}JcM3@nMQhT~ zf>Nwm%^F_tQ1d!!;v%L*$1jMARD4NPtm&3-X_4q;ONx<7UXpC|1NxAR`siFb2lMEx zJX<<#p*%3KB&=RkuGhB2Kkf#D;snAyfTT%56)Bja1y@Y*OHxDwWDyS_emrzV3_qEd zVh#`j+m{p?;J}$fJcn>MA@$@7-H@+A8W5$kj9G;BsSs6L^+F(Pr|bDQVsF64&)0d= z_PJZOE7{IvyLfz*zdAmbO4V8@{Xy5A$v+;Ou#k7AubOJDZEjY5XUhw5wF2y_uFdy( zPR+`W0OT%&LmGsxZ(oU;?l_fU54KSYT;8AuPxWL=KCumZOE*+e#Xd8eYQH9byxj(DmUS z%dkDm67;Z@jTXX>g=jjp1dD{92=RU?>pwdY&Ox+2gkF1C=(a^bi_h^6gi9c$EMh4S zYqKo%4|iTq|2hjU{^Rv@cw^yWA6@Ifa5;lHN7p(LAR5`q1vrMj1d0|Z|4)=ppAL>Z QyM@Y7=Y+zKPiQ*%4JE_sX8-^I literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/fc_fuser.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/fc_fuser.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e554fb95b69524b12e28479c4b16a5f43c45e10a GIT binary patch literal 4356 zcmcInTW=f372Z3S6lp6;e39!obecA%EmM-M_>x*do1)1}U=&Cn3I@Sq#TiPgESKxq zr7h7*G>IJKp-}wLhXRQaP-#)LFGc#=6bbq__BF8`{)M7w`<>Y(B}y{d0xgN1%b9bV zGv}N+^J=A1Qt*53?<>wAul68)n;T zSaNRog`X*}E6ilpJ%w3Z-_jd4Q{Pl-g(0{oH*0EYzt)ZTt8T<=S~~SpmocBeF5GZ! zGhx#)!DzLSJ;8rte0P}uY&j0*UW*vv|+uZMj^Bb2!X|DNB7<+B6$HjcO8Fxf;&Dn*tx{=oj z=37k%BQEOUrkH{T$MHpUq>5^(z3JQ#^^sa*i`>r;~6V&!ck<1kQJMRLH34(k_sqbKi`k4gv13v? zVI5~DGPsm|Qew}tQ>z;L&KM`-bb5jlX8Slj3qF7{6FvB_GY?4f=W>p-th#N&qs&BG zFR`;Xvb~fBIy2?YNSEiO9YC>bhuqTIU@FlGZ_h4`#Q2YPNm&1GmxS}(vF0y4(4hJE zq~`LZbFIG*t#@USfOWP7wwr9W(-Lo-1^)fq49>^}p zxST^=UhLjM09PN~zyIx5pAR3r`|a=Ed-TaykG4O1{PDe3^Wi67KK|lYk3PNk^}Fwv zs`(G#KfV9x_d5^&avveI)(IZ|@zcj2eg4fCzj-`(@9}T{{LPoYe0hvaG#iVTFISh* zUAk6}*4&UEw(9fX93j^BFD^+^;;>x3G_mNli1ku+QW{B=xxs6BTsT*pJKS4ci@}9* zNBxkc38uNiSAgI0yB@3ilvAU6EZiUpI}ty(v3v-RxhnEid-<)!x$45(lY2Oz(2D0q zhpIyhhbeOv*jf6jBZ;-!=e!{7#*=Cg?Ob(ZnXEil-9&%+%3O7Y$L=z8a{X`(gM*3z z?ImzWcXdKC_apxQu%ZrtwrIHoQ@fd}8@TlJD5lF@^O!gSoUA#D zQEIR9zzN(opC<;md2m(rUL0LiD_zfLPUyxl7eW1o-)Xvj^h$k1)0>%abQz(kDP62r zYBev)(Zz)!u|?fs$$69(I^7sv81?K>c z$7>DgLLL4SKT#k=RoLiyXEI&#DxtoDTBja&oZGJO+!db>>5lD{LRYx$=&if`xAzK2 z&D*Zu<-Ow*`xytWm?nu-QVhx7vL2ZrHzhkpmpBVXL#h}?M2kZ@{9aK~BGjE>g=Fn9qIiJ- zsuX^+R!Ix_v8F{rm$^zBJGI;}p^e zSk7~oSnfVG#BuuA(lTe(~Vl(gDA zc4+i8Wf=&?$&F|%YdtEf6J?+z8tMzJZ^UY%4>Va;V1hw6|o|w4#^?{MdT18gH zT2>X;%(Nn-k>AOWa1I>BMQSdfN%gQ3)+~7vsVU3mG>ZX_kVYut9Gh!AN{hjWLs}jW zorC#e+6z$M#?Bb$7myN>Un|)EH0}T7X#e|Bo8&$8Bfp?lCX$XSqW2nUFTN2k8Ls2Bp;sL}89}o=+Qqb5>ShZtwOX;I1O$xKhy2XmK zN`g`uh8{tn10Yj36jmM-Q1aS4YQL1=?AC2oysBu5V`sR7BCG6Baef4Nr5*K#5|>ag zstIIE_|Y-Eob8QGx3I5-O8Qb_q1b)uftD11t$n2Sr;;fo?wRa(v+RL4q9SIgp~sSV z9?kF{l>VNj=@~TXlovQH?#5k#vn(UOj%PhPdmi2GBXbgRob zUQp!!wQ`mpt`qjl5<5#c@VeBL!?YwbqP(BdsqEIsCz zT5_Crhjo1#mmP;vdA1_oh2ka170c9+E@>ryls38VJI=1UNY|9c1zpwhNqYvvmMmDk&nY$uNMbfK+U`cj)Xw&aJ@CMA?UD3nn8pv7$witn|pM3&SmsY#47 zO`Aez2GTGvGfZ1kQeXymhqq zKKp(4J@;5w4@^#GBl!8B$fK3?JCVpA`9}XFFgSs~Hh~c_qV>p16tA&*Y%{(Rj}jNJ zCpMES$<5SCO1w|h$DWEj5;2lS>a~cGGUJ!xD`_M8WF$Z4KuY9fK5C~Q?zYVbs%TI(YseUMgPRrpuK>BDNI?3y&hSM8c`$mT6!gJ#Eryj zk(Gp0zLGS?j5OY-UXLUqD`PMrV;_37(J`%7vtI3(57g?lhFP^}jkOw8()mhF{A(Wo z=tMfvUgT2jbc}i~N9iVH#ZTH}#`$&H4O6pXQ0bg+nfbV#YuY$;R4S|WYP(&j{3`Ol&m7%sbdI*Wjq}yUTD7r0H-Gfmf{e;sX|DKE;pn+S zO9bg?v(>3>*0xOR=vud3Yc`J7JZ~zelsgsB%-Pm?i;1NJ?HE8Lx^>Gi-)FUnZV+xz zQ$Nq?cg!IB3~}TF0&zNWI%>pgkzUkDT#Bwo8SKl6Xe2-x6O?oaB_k-A5K2~1vLTdl zK^Y67ObALMgfb~8;~|t?f-(_8*)1qRtEY@T#&j(TZ}$r7zRRe4`#L$W_UKlI&oo3>}ulO2z1;y6Pbxzh;@>^ zm~oSF^X1rucrT88&4$i!7G1n zGp3E(jXUsr=XL_TcMaI=MJqo*lhFcR_%leq8xny9L?x(b-7_HNu!suqLduaodU!k% zh({t2k69rF^(&+t^+{p0<^&bhFrY)!d@tcf3q0Z-qm|In8qnrNWxh|7j0vzpQ$Ljv z0S*w=tRhnAlOW>(tdLL)P#KN*>0oUnVO>(iBGpT|Yk5J;TvV&V!!HT^@&LXh@Otfg z{N*8X?iF~wwmkm(1pe57{NnmK3pe@@_azwz)72>hTupC5vM za0vdPA^3*}@Q;Y9!kQ78`hv*oqXK?Wz%OA&Laj>L`55vp>4=@m{+ESRx&Fx`vk$D0 zc~Z#v3V3vnNS$U400#QQ7;0cl)j+zJR=T7Wp03Azy2v9}1+ZEddcU;9qrzT6r45=o zhINr<7a7nhCF(g}_091lqMkmkzTVjB)YUYi=z8WxRK^vujVHE~qQiO7L==U_Cw=yW z$1wwsGJ!bCzJj$OBs}Glknu&qo8w|(8D{k|s%(Ur~Fo|sKQeDrR<6>bKV?C+XyO$^Jk3U`$zRZt` zQ0ScAgvzbV7owqbiwqiT+X=V-F{`LJKrgH7KA*g-0xM#+5uk=;bvI(RJ0z>nBP7&( z61*q?J0O3zFA8B2aYY|Kr?*>0LHY+?qT~>e^7M*ECXF3oa z*+mdnA))P)!02?K9XLSjN7a?D+s6~^zAuGRFN%n5yntKajH~_C zF0qRP4D90kcX@kzdtvRjU?)5QgdX1IVFa1()qp%bu?q!OXoRi+m0eTn2Z!>Pmgs8` zCCG{dR!F!Y^!*KZ^i^`Nuof_2?LI_)pNjnc-hRjl#tm{@Egn^Cp?qz_KGfxY)mg(N zV&|Yy}atPl99RuDsX^U zNvft^>*M)0sK5at0*dE5eLS*WV6pJ4zpf<}dIZ&9*F448D>^;SHN^dZyGEc-X4=g$ zFw#@kD`05%>)RP2gIz(c0KJC()@NtXC)H?(Te-~JVe=j6tjPHtbw1cqs zkg9KQmWzcw(A+zyb`I0v*82>vv$@#bbin81BF6U%D5EgUf8-OI?t|yw+s+El*>B}p z)n%W@#L6y)orQ?~8)D`a@aT?!eN@P9YOsI30*#mlh`V-G1>g7SVl_>P$l>l;==z5~ z>JJ1JSV8?^AJyxR(4zX|Klbtb6SQLL2pFM()58 zO2RLF5@z6$K5ZE_=T~8KdNZn$AQ>J2y1Qhr7IzPcqtMz*DhYQ939@1&bgKiB<(O44 zEwYGMNqsJa7USQxCq?J+&Sa505wjyc>)mr#p2Uj&F0H2oR>-7~{YlvK@7sHXJ^UX? zMxBLY$7=jq{8K26e+vCa_d)!#BzOJFl|R4#8)y6CpWl1s`rGeczx>k=-g@oYmG6D< z+xM?u{KfSvKe%@B_t)P3!w0|n;q`Z3`{Tt|vpMw>_Wzt~|MlwiU%q?o_g7lyI~&c$ zwf}tQgExNjr{DeTgR8H6@blmP=?~w%*PTu&=L)6bLhg9(&WFyen)rWFvvKDDeG&AN z&4xKZSOTFifL#W*IDjnyJ3oLe13TA`U6=!QVE{W1?BW2n0Bo3X3q=sSS*U&sfet|Gs% zI9qMC%!Wax;^LqY44R-74(HC5@mJ*EARog>5yN~XUHe)dn7+t((`v9uIm-dBYHOr| zYtRX+T5FrRCkWjF5(X~>wJ3)dnL}y{R*hM2n}f2cb5s(wO!6q_7W-nOSyVD9PU;WV z9NCnU3n5NwDJ<-#tZuNBNaPQ#Wx9qfB*`WDn2v|5a5@lXAIB0iGEmii1_tuZr#8%- zVXjrX^-k`Txlw(ohWcX-`WnO0ZmV70Y}L)&*;;2Kw^nafJGu61wQg3{s;eC^pRLvF zxz!C8E30;Xc4>Al*Jo95Z3b*6o^UOQz~t(w_BoU1mB9R7tI zlxfr~bG1`He;5+m9kXiWnrk`Bte>wn)^p894)SuV&COP~V}jJ}V32F$U)8g@N7r)a zn_Z}FZ_tP{rr8qQ&AQ?GcsK`t@PU9_+w4H95>)Yfb)1Qs%{^(FVz$v-?QWWlPL&;w zwlo^EPwT)iVusnN*6QuqT(JH5L(Wb!SU2QA?w44Fr9&B#1q#>hOCbk@Vy zi`J{tQ`}jvo}w-wrQqe8RQ&_nOR6kO!R>|S*fFCerDb%kw3g*?CuOc6YIvkRYOd%2 z-BU%fG_tHqYOO4Vm(tEb(4`pM`{^KRTa=bkaDS({U~O6LMqu@atS-zL9y3Z*^;!xK zyt1s!B&vNxxe#9bnpM>crraV{LUDL`$+VV-?JKn7A2YROwV^71%wDAD4+#x}r_ze@ z_^|0%-XR^T|CL6TV_6kS`D2!&p1%~H8kLsOrk3YaEtbPg-Dypf=X84JcMcvaL0H9E z!|Pd=V0lipoO1XcLYLLKLU~SBKV}TQkc{?&vOZ3EYD?AXTN<_c%5(Y(u)s*6?uJ}X zC{#sh)U{vhX7S23jW`Tn)0$n@TzPIecfYV9d{vQ~KM3x;a&AQD*PJS0OQU6SFG{&{ zfg8xBV#p1ou)zm7O*SU@`W)XCgLkJsUUx_~hJLf!*_g%6LDRzR8{~|R6{nL;UXUq4 z^g+xUT4P$HOde*AE^*CHEvf_U5M%!gmB;z;wXZq(x^wAkZl5K~;d@fesb4umb+HdI z<6{IyHc|v=$EedJm{XAb3})>(m_>&UMCFs`Fv? zF?v7bZcL|3$4D+z&gQKx=FNv+g8JsoscaN>E)L36nJA3z->TJ>Kg4ROZMK?LCwI1L zHE@&N-Z8rcWcIMosWC8 zXMIOpcZGVx_|zIWSUOHma14QCriwTU1_Gf#vGWv+Z;FdJ#^PyLWwmKFApN*Fozuw8 zrePi*QX`PPC}Rd22kaQ$M5$F>8$wTQX+3{5pW4I}hu)Q!i>m)nthgyz2yZR2rGR_* z1$fwQZ&-N^catuBRB2vF`_GAGKuY=)dC}_&?xQIBd7LNtD9QwY5$)iCgiFzbdW@l;hc-lqk`*Jthx9+jjD$YQ1YZxX`ro)WT~|~&baS=v)OVOTIVnLv1m#@Y6)Y;#RU034Lq%tsAH-< zX4Xxfy>5*uTl8RF8Q_J zDBSd|H;8B7v5`2l^F@UWJk(PC{ zQ|p+UDn#Ob$@wyQP7-{D;Bj)x#f8W4oUih&Ojf<)JmDiY42NARpV6j`3Ww%d(1S%i zSki-KJt#R(QH|_9dYk7wOGk(TAZ?W4rRe+VTirh`vG4sVh9us_wbnDQt-4S@%VeE{% z|G@*N-1+1azG9dR?0@X6e9N7WbBZRc5*P#~!5Tn*@~;)|{GPv-W;bs;WEvMlUNZ7o zdrWN(>MD*$crhG?-X_tXY5qBW#GwUJ1Mpq`5P==g)Lr{ z>@1FIT1wocQc;n!GPN~Hn77?wVP$WCsh_pDG800VTq~}W1#h_M4VS#(vNtTP+&olb zzufXl*0p)gH=g&63oFxFo^W0G>Z2D|4h_g$L{PHy>ep6yr z|J)S#cd27bCZY*3KLITXr6nOar-X(>&^i2<+HAx~zGIwg-M(eIy*owku#*M4aG`RaL~-ld09Gb<&H znKBmgbz1OEK*s^_YE4q2tOp7GIy8n}P#!Ij7D=rdvVNtv8cu+pH7#ex@J@O(x)!z9 z%!;RnzR*L7;h4V~%Qdle$A`N|Tjh-ue^U74nES;xx3Im)RXoFvC)i^b68H*-_|=$r zMm~xsXP^qF`2?GC~lo&SEGyrz8L_qF%S*JBZ1FO#*CPV z46$(+i?7rGHYIqIE*8&a&qVn38sFxeF+m+0LQQv)mm(K3y=V^{_|$r4CW5DCv;4LJ zg@FcO#*ak|KIs-YjZczZ$o7!T(`h5K9En9Lc#4<2S2G4a5sJ@r?gAe^m3}PJ$zB}? zEj<$vcs?U0@e}X@a*e4q_|;3|Q={2l3ZF>dvmNVYeiVBndSSdbjst}0lg>p1NbIOA z-pq;gTfCzaTQy7E(fuFqi+P7=XT-L#V$?S6th~eLcq=WxO5|G?aK}wPA!RG`tqZef z&D#0~Zj~`SRjanQ_Q`FX`s$F`$HZ5V`0wNMlfGKxoR9mH5~;q6%#HE%Ro z)a+#Pk9}+!ePtTI>6at&_xW^6 zQ1LZ0$wqe*-@x)~u-LciuWy-kzrJNwZ&PHKuq=cnva;W0#Gxg=!NqE#uWZVVBy(p^ g2N}AZT3smgEcL=|r;ka`@8>y*Xe8{ixE96#A0p(PI{*Lx literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/interpolate_bilinear_fuse_pass.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/interpolate_bilinear_fuse_pass.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..306f0f34dac5878106c9eeaba4b5707708e735e0 GIT binary patch literal 1029 zcma)4&2AGh5cbdRW{DEuz@btl1QM4mQkM3NP?bs`E>R^eXfKuF;l#`6$Z{RsCxuPlZ+~oF-0@3nBtct z3pxy0*k$$m|4D-={KNbPgE+eeA!dL=Ac84{94NSQ&=XL17feI0=$d?K>44(sBw-ej z`B;d&${QiDQm2dAS7PtL!4H#`Rg=0!wUpINR`X|v>D!}IsZ^nb(jRqEoAlF>X*bf^ zG8W@0BdI5WX)&S*r!mD*Ql*4+~jbp9HwJ zwAlOb_#er!LlzNO9Qy~LE&>y?fW^Gsnz7+sb=O-OdBz@(-wuQuLgGcJAtb!0KA6kko068t$J^m_)+`AOup~6^i?~{$TVYZs}O51p>Rj-)v)AFMw2A^ zwZ4g_9cqWWp-Yjo5t4Hy3^Sv8s2;r-?uO6CE=IbM>v28ye)wXvo#;Y`eW6QXJ$^1? zp5)JQ{+u`$Gf(QtZm67WB=i)$>mkzUL@%kYEtB;yIL!~`_FBs@NuORgYty?OM|0|U zCe@2u9rT$A>P(f(wMMO7F8AW?rdfsi%H{KhY1Ntyf?2J4t=)^(8tvXz1zv8K^K-dO ztQWJ4`jWX7Y)TVMtSC!UzK`Wr#cV^fv2wXuuUJ;O{Li63)(@{X+J~)9W3AFysx+2! z`NJ<1L>$Yyao(smTZbu*J+J!82NZG?sQN^TuMh2yfKEuqm z*31bgxf{P0X&VYpg;U`~I33o)smS_$!@`+WzH#Du4~WO``vv@3Ck`N?DQiNAV(4t> z%_;NW@E%5tkwbdChS=(fOX20Pp1guYmwhQcCg4N*$Mm$Pe+$!(>l0UE4u`F^5HPk4 zaGqq&pkU71FOmEyU`0IgqwN^$5xpLNpK@%+zR%V(+G$l0X6|7Zv~o9ag2M=ndt?!lP(g#O8E5m4UC+U*79r>?{u4A(E( zfcC;KfV1|4{r9n7RR5=${(hfdKEw8-hdDgZCn4DK7%Y$ZEPs%_dw{8@^$goh9(jo2 zg6m!hk0}ofaB(et*rNyW5q1K~TK4w}D^>47radIBBx+$J;7pzM@g@tSXZ?|lL$kQF zFVRha!(sLk;=>$roF#c~3*63tXG~@c;m@+)fx-B5EOD>Yi28*_fOCJ5dCUV#WnJPd z>4nypaY^WvlzOGQDMt=;U_J{zIn@6U>kYV)A%Mk&hD0f~a6qB-KB zVU8Sc*aJ?F$avqTq;B$T^7 zrOMPXkF_qg1LX{Olp_isvEQD~v! z{RN*EuAc!{S~wAPd%=@?Njnwqw8sN|`fx;HPhdoqo`Jl&X&F&qIdR3a{>6caf5qX`Ax+23D#4^m2GDI!58|g8OVi`Bp1HL;{`u=5f$Zk!A-1Bu3zB; zwn6X|y$;9j7GHJ)SO3zr2(JT_*CMXq%N(ART8Z_`&3nMP|I6GjBn0REIQ%p2^N+AV zN%)F~16d&ES3sHnQ+>TpbM6Dmy(hR=t*%5ZW;4LK|EoS~n#WRo^VkHmn~>UV?QR7J z5xr-aQblh9(by{IAQ#(#^86>PQhIy+GVY1xC{F~^bjM}1;4fq@+JI;w&TQ}3SnI*L z0C4U<>G4N!w6?(?+XAAcBs}Zk;N}~k1N^sntoCyh83t>a=9EuoH-><7|8qWxK{1?! z&Xc|vidZNK&wDsfghel)1N%wD< zEEH=pR%+JZg&`gQocq7w(I5F6kj2~Kf$csIh*1Q+m_y0KfwVr&qXpWypseF^U+>eP1q>}P^cCh%>%$Rq0iev`tgn~6IMt`;4tQ~g^y1F$PUs-^jDk|_ zA$A}~cFMV6IQBW|5eJ*?uCw1^Gi$WU8a1Tf$>t{P0TeA_Bdu!r^p5B+p%*Z;&`aW& zeK;b+mF0++v$~QL(F-}O1>)%R(urV-vlM3p-T^sLf;{LgHM6e87>ja3co{O7Tie4e zI2(4s%e$nPcXxM#Lx;2KJX4PAFY6ch#IXyy?3R9Z@f~P)yJxJrx$i)`+kFHll?FLbGe_3t6~zpwv*Pld_>D$M+FDIMyvn5%ucQ|a2|jY+xCFRHmBOqjS)ZQAvn66 z;;6w~zsAwvs!W<|GT5K%?^D(B&7F;RWVObPO~NBX)UQNz0B0Qr`!{P;6gh3cZ<{r$ z%^Fo$Uy$PlwH?f&{by^`fp}D*df@bK)~Hh5%^DT&cX%(08WOL+;^gG+gOD?vJE*xh zx#QHkHfvPf%^FqEDPyxn<=+i{95pJj9~8TIwIA#|v2a}nXO>#nIhlyFu&TEG+%w99 zPZ9gDuifX{*NR;_<*?7QyYn(wH8(X*-O8Qdq2Lae?hZbGQC*g$@%W|6i@##{OFrNI!-f{_hgSRL zD3*0YwgmM#9>w6}SmN3V-ME+dD?9gNjH}vWA4lF-2u`ZD=<-rN3*^<^FHf(~M?_56 z?bK<=^8ek+B=j6vnS`Ef`(|a5$L(fi(is7ii+_&FWZ+8M-6uy5)?QzxA`rH?IER)~`Rj`RYI3y!zISSATos-QV5%&D%HMyZrlCUr%fD74%Pl zH~#hYoB#Cwjo)6yzYeZ68#n&ty<0#0)gONIi(A)UyYe1L-9e>vFt;$dAIBle*0Lhb4uT0Fjg+Q#0qe@aXj+c@WY@>+R zTycb>?k--^6-7#yf$2rXKq1kX8H%)_BUlEdcQMeBULh2e`9Li66y>xFez%ujAQ+in zMVYY0VBagnJX2$_Z>j8elniFEsNJ{Nw=jB9MO2H!rb>nSr$v?)4)WSupg%W`nxYSO z4mi)tsiv5}&3J%dWIX2OJXjn#+s?}wd_H)F+t4tkADYb~G#hFvQ*l06(~Wgd&Yj)R zb(9QJso4?kBQ%PMoZ5oZ}f7h%fepC3LdN1H*s?mk0_T4^yf7z#;k0OrYCBeA8Y zMr{Hl(*??2x#O81F{~B3%sP-gss#@T!lW0>TA(nD#-bfDVa~W4;zE9Qes*D|I2*Jp z7QEe-XytS>KX;mC`nTLJs{2G}Y@Tjs(w93X|UN%q^tJDYG+t8|r24QDkA`_z!DeY(y?JJIW zQDmVcpTnj~6-o*LMG`N~g_(SgvJwRrqI-D6&QoZS&rw}&?B2^fxEO$dcX zDkldP5wIM+h!Bd}a?G6;s%|Vo3{n#H5WEKtBlDdwXifAjZ~0t6J;U!lL`Bt`n8k2Q z;GlUmq=Tn3Vj%Y``GwQDU=s;RuBJfB4?}{ia?PUQP-#>R80LPUb$fmWSzV#;ieoLO z`rh*xUzIcUzJi&uRZKE|mcy52kEQc7*+yrzwKko}wwv{u)t(M^5WWm;U}};cpm`5g z9@Kcw5x~`sMbnPxx$i(=FoBt;J%F?fqi(6wsL~*;)U%#hYWjjGUSjZJ7NBSHhi^c$ zP)s*8E0wEFvw`a8BJV8}aJ8u$i@9KPz_B1A355n!9lmCg0kjhc7+*tct|~@~R%}ec zXP9dFyjX`=)S?zVE;!8={U-gHPOYxv9)LE!q~FLMtv9hYT90NGll7^=(D&jT`}G~33_ir5LphtigpZ3K z68zcfCB^Dyt*2UMZ8eKrkWW_UjETP}glxK?!X`B{QS*~{Gw(&y3$Lw@2?~r+dkaA7 zbF`pv@UPc+1v2Rrr1hQZONXbPg@0oDx}-n z0ql)A)pW1kCS7zORW)AfROXOPYCc2F1B50N zt6BDg1j3~WA$yvd3^fm-Ii88S{*jK#D@eh9nD7pwkqXr;R7&kb1kF-&n3^0lc{HPV z&DaG(o1up8|Hwp?l$?6V+jG=gq-LI)1vJuxhG8Ee&?D4*mYSo~EK>6*HOHv=JT;F| zL$^fOU!>-7YL26koR-`66ZpjUbJ$;^&M#B*6*MB*63-s1A=h46A7>vg8|_-#Sj|LC za;r&h6?oq{c}#kR&!z(S?KUdO`}4pRBziS>5~{i z*x=hVHDqGSF|LOcq@W&V>{CQJ!K$j=v;7>wqS(snJ%{Pcj zWGesBt*qC`Y|6)86wh{v5X2zjos@l=$mot0srqWCZof#dZ=uP=m6D_MS2(kFnFzm) zW_=r5<8B|;?+uuTJbR7xUBMd*?{pKb)~)qDfqa(om{Mp?l5RX-F$*-Es2PSg3YZ(B z=Zv*p{CuU}G3*M^d&#ocMD!xYOWq;ei_r}*>l&h-+VmX7;= zdU0OLGWVD?8JhcvZ5m~vmSi^}MU(~ASZzskWk3Ao@&8VK&{U0?{xNmqJAiwHcoeuP?tGQGYZCfTc_i|_g%ja(UwZm^X?hQN<-))Ah{Gp{7# zNS9!_{&6`{ZVI2%lV7$=<34%0sGJ&=Rb-pWEA_7OaEj$cpcjo#rMnxqL8&gpeNq|` zgk<&Sxq>LE{>*SA6$3T>623b!h`Z8T_>bg_GcL~y;k29?RPNp0Sog~C<|A=YpT&p} z_VDcGj9t=^AoK%!BGgVsN6k%}bDZ)j#lJt(Y20}uqH@a3MHiy-fuiHaT*N}nQGRYh zvcPx88|mSWNRg$A-pxw?Jng0Mk5%gNS`cKdtGSSU=Q6!>g=@W6F6p7LNXqS4;^MoW z`gmqSZ=76vls*KZc&Q#Qc7OEf^QeqaesJe4_sJVo5fjQLl?#J$B`*mbDU(UE8_CT% zRg7(K{@?way8htp6}qZ1>@?{`C8IcgsUl$g2byl^I_^)z?TL|#G29Fqy&mD)7{j=W zF^>BP@s4{zqun@eS&Uwfb@}d!0KA9?FQ((u**MYS_5v2-mvOb(vub^2Hbe&mrbu$InJ6a0m%O5gvx((ni2z91x@df5C|Q5dujS(-RbK z@Qt69&w@X}fs;gtladELCG-Ti3BJhzzA1-q;$n(58FL_!>q$K=zM{xlZ2@(v8!zLc z+UrWUD2m48C+r05HL1d9)~TVwg1Jf!m7CS+J;xbhwg|pP%>^`>lqW6d9N$Zbs;c=m zp-`f5dBl8+kjKR7`^N-*NA>r{#8I9yAue)&IGaDAhDs}aDPjHyus>23P;oJYxpf>r zv`?d%b3FVBo=@qSKB-=spGS;;U#8=(UEHrrRdU>=n=Y48E$QG-!sXt0xlC16(ZiS1 zns1VVKc$A$>P^VIa;rwYUM{agC;Jw|;1t50K~hA~iqy2A z6vwP$H7|G=^D3(2BBn&g&xndtd_h#K>5lJck?3Scih)XAkZkZB>X3~3=v+Dn{ph)N zOKJ4emD3jHV*~5L>P6`~ZD+W9-xv%|A>4gPniN!#f+<>X#T36FMbzUKagXE=E|o?! zf7|w0Vh&&efo2p+VZ)w7Jb`dGA$8<4-I6asDe$J#j9Db$Qz6Qx?1VrOPFM4<#NL1( zKW2H|bh%qKYuU_Yvv_orzdAmbN>y4Y{a#nC$v+&Mu#mT=E9-KjZEn_GYiCRGcLms$ zZIk~)c4F3c07!=r4!IDzIlQ8AvS&978(4ys*+NeT-h(;6R`7E%{g)Q?2j8&>=RV z3EkYiN;T}6W(iu_$XW~GheFh?T7hH2kA!%)lGX1&5%xj8y#)m?0iyX9Q7t~!I}k2` zl(LAWJgiN#)IZo;Klx)c_<6 literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/reshape_fuser.cpython-37.pyc b/x2paddle/optimizer/pytorch_optimizer/fusion/__pycache__/reshape_fuser.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aacacc23b30721a8e66c5ae6b8a3ccd75e8917e GIT binary patch literal 2292 zcmZ`)-)|H}9G{upz1v=|r9z7-iSd*~Ia2Q0(iSC!2oiZQjS&;gDVZ!c+jh&|-ZHbR zy~{NL+Q@^!KKLdLY1J5gG59J5{*QeHTKX>-AN+o2_gZ?$Zsxn6nfcClzrW|=z(9e( zbMfD6`~sjSIO#qnfDd3OB!UR)lNyCD^I5~F85GwH-)vYltC6ea^xX9GpOMQ%SR%JZ zM9wuQC}8Nv|e8mXD-&uf;*3mfLSJH#ZlJgCr@6hC#<|GpsI8hT1jN zW*B)5ukFfe*ovC6zR34i=40hGgQ`re;xKh(C2UCyDjK2~5JKA{FWOz{s<-C=zzIuaiK1hL<I zG}ObvAY>p#42st_OmSE{{0KOFIDP`kl(yH`pFe)EyK(dRueY}EJ>I_i)6S1;na$7c zJ=%Hr!}k5PCpT{wO4-xh`}6kpZ(Gm)UI)i5HiKt>+~4{B!PAF7?`+=M`Q@*tkG?y* zcLEPCPs~i0&Xv3%IstI1+`Bmgn_n(ZOushg#xnZj~$5qwb=6o51lA-Wl)**n{~%m=PTWVZ99}+qXJ$?EH4P-sC_82 z;FO-88keXf)NW4CRg!PU5%8&YpFZh(>ZLOiy+S6TWYY8F*>@uQPVS=k_VEAtuvg0I zGLwhUY)dwqAVjYr)Wkc@{v2Y?6+# zMWOtRO&T$v#ulW9prho~tGau!If-hP&7>HI!in6>RwHbNiHT85%t z6hcQrK0&fTq#I6DUvyQkGlskX8c)On_nTDcEc{4eMEAouAeMFUr$Z*-s?kCIWcVss2>%?ywU$iUSVOYa-${_+Xr*fh7uC35xHS8OG> zlJDd@)^#Q?b?^-9N|Q=LKkAvpO!~DF~Gw~VSl&Bld=RdFThYkKnN|uTY$GnhpA25 zqX*phVie0Z-a1(LoF@g&8%+`Wh>M)xh#f!O(f5LU6C}%bk(7WW1KG_~cYUApeTASz zumMTzEBanT7bpyoO^b$o+_v;8+^SX(H^NrJf-srtUR#yDOA79zo@=X;RUA*#NlJ`= yZDj9gy(!&FD4URg4Qb%-F)5P62N~kX^R)*1DyJxIgtMs&&SQV*YE6fzZTttH<6u$% literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/adaptive_pool2d_fuse_pass.py b/x2paddle/optimizer/pytorch_optimizer/fusion/adaptive_pool2d_fuse_pass.py new file mode 100644 index 0000000..737ecef --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/adaptive_pool2d_fuse_pass.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass +from x2paddle.optimizer.pytorch_optimizer.fusion import AdaptivePool2dFuser +from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register + + +@pass_register +class AdaptivePool2dFusePass(Pass): + name = "adaptive_pool2d_fuse_pass" + + def __init__(self): + Pass.__init__(self) + + def apply(self, graph): + fuser = AdaptivePool2dFuser() + fuser.operate(graph, match_kind="topo") + + +# 用于注册 +adaptive_pool2d_fuse_pass = AdaptivePool2dFusePass() diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/adaptive_pool2d_fuser.py b/x2paddle/optimizer/pytorch_optimizer/fusion/adaptive_pool2d_fuser.py new file mode 100644 index 0000000..93085ea --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/adaptive_pool2d_fuser.py @@ -0,0 +1,133 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase +from x2paddle.core.program import PaddleGraph, PaddleLayer +from x2paddle.core.util import * + + +class AdaptivePool2dFuser(FuseBase): + def __init__(self): + super(AdaptivePool2dFuser, self).__init__(graph_type="dygraph") + + def build_pattern(self): + """ 描述需要替换的adaptive pool2d图结构。 + adaptive pool2d层模式python实现代码示例: + x68 = fluid.layers.shape(input=x60) + x69 = len(x68) + x70 = x69 <= 2 + if x70 : + raise RaiseException('Exception') + x73 = [] + x74 = x68[-2: 2147483647: 1] + x75 = len(x74) + x76 = [2, x75] + x77 = min(x76) + for _x79 in range(x77): + x80 = [6, 6][_x79] + x73.append(x80) + x81 = fluid.layers.adaptive_pool2d(input=x60, pool_size=x73, pool_type='avg') + """ + + def gen_name(id): + return "x" + str(id) + + self.pattern.add_layer( + "fluid.layers.shape", + inputs={'input': "pool-input-0"}, + outputs=[gen_name(1)]) + self.pattern.add_layer( + "prim.len", inputs={"input": gen_name(1)}, outputs=[gen_name(6)]) + self.pattern.add_layer( + "prim.le", inputs={"x": gen_name(6)}, outputs=[gen_name(8)], y=2) + self.pattern.add_layer("prim.if", {'input': gen_name(8)}, [gen_name(9)]) + if_layer = self.pattern.layers[list(self.pattern.layers.keys())[-1]] + pattern_block0 = PaddleGraph(if_layer, graph_type="dygraph") + pattern_block0.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(9)], + input="Exception") + if_layer.add_block(pattern_block0) + pattern_block1 = PaddleGraph(if_layer, graph_type="dygraph") + if_layer.add_block(pattern_block1) + self.pattern.add_layer("prim.list", inputs={}, outputs=[gen_name(10)]) + self.pattern.add_layer( + "prim.slice", + inputs={"input": gen_name(1), }, + outputs=[gen_name(12)], + start=-1, + end=100, + step=1) + self.pattern.add_layer( + "prim.len", inputs={"input": gen_name(12)}, outputs=[gen_name(14)]) + self.pattern.add_layer( + "prim.list", + inputs={"input1": gen_name(14)}, + outputs=[gen_name(15)], + input0=2) + self.pattern.add_layer( + "prim.min", inputs={"input": gen_name(15)}, outputs=[gen_name(16)]) + self.pattern.add_layer("prim.loop", {'input': gen_name(16)}, + [gen_name(17), gen_name(18)]) + loop_layer = self.pattern.layers[list(self.pattern.layers.keys())[-1]] + pattern_block = PaddleGraph(loop_layer, graph_type="dygraph") + pattern_block.add_layer( + "prim.getitem", + inputs={"index": gen_name(18)}, + outputs=[gen_name(19)], + list=[6, 6]) + pattern_block.add_layer( + "prim.append", + inputs={"list": gen_name(10), + "index": gen_name(19)}, + outputs=[gen_name(20)]) + loop_layer.inputs["input-0"] = gen_name(10) + loop_layer.add_block(pattern_block) + pool_attrs = {'pool_type': string("avg")} + self.pattern.add_layer( + "fluid.layers.adaptive_pool2d", + inputs={'input': "pool-input-0", + "pool_size": gen_name(10)}, + outputs=[gen_name(21)], + **pool_attrs) + self.pattern.build(inputs={"input-0": "pool-input-0", }) + + def insert_new_layer(self, graph, parameters, matches): + parameters = graph.parameters + new_layer = self.gen_new_layer(parameters, matches) + new_layer_id = list(matches.keys())[0] + graph.layers[new_layer_id] = new_layer + matches.pop(new_layer_id) + + def gen_new_layer(self, parameters, matches): + layers_id = list(matches.keys()) + layer = matches[layers_id[11]] + pool_size = layer.attrs["list"] + layer = matches[layers_id[0]] + input_name = layer.inputs["input"] + layer = matches[layers_id[-1]] + output_name = layer.outputs[0] + pool_type = layer.attrs["pool_type"] + attrs = dict() + attrs["pool_size"] = pool_size + attrs["pool_type"] = pool_type + new_layer = PaddleLayer( + layers_id[0], + "fluid.layers.adaptive_pool2d", + inputs={"input": input_name}, + outputs=[output_name], + **attrs) + return new_layer diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/batchnorm2d_fuse_pass.py b/x2paddle/optimizer/pytorch_optimizer/fusion/batchnorm2d_fuse_pass.py new file mode 100644 index 0000000..1af1b88 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/batchnorm2d_fuse_pass.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass +from x2paddle.optimizer.pytorch_optimizer.fusion import BatchNorm2dFuser +from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register + + +@pass_register +class BatchNorm2dFusePass(Pass): + name = "batchnorm2d_fuse_pass" + + def __init__(self): + Pass.__init__(self) + + def apply(self, graph): + fuser = BatchNorm2dFuser() + fuser.operate(graph, match_kind="topo") + + +# 用于注册 +batchnorm2d_fuse_pass = BatchNorm2dFusePass() diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/batchnorm2d_fuser.py b/x2paddle/optimizer/pytorch_optimizer/fusion/batchnorm2d_fuser.py new file mode 100644 index 0000000..6b6dab4 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/batchnorm2d_fuser.py @@ -0,0 +1,158 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase +from x2paddle.core.program import PaddleGraph, PaddleLayer +from x2paddle.core.util import * + + +class BatchNorm2dFuser(FuseBase): + def __init__(self): + super(BatchNorm2dFuser, self).__init__(graph_type="dygraph") + + def build_pattern(self): + """ 描述需要替换的batchnorm2d图结构。 + batchnorm2d层模式python实现代码示例: + x336 = fluid.layers.shape(input=x334) + x336 = len(x336) + x337 = x336 != 4 + if x337 : + raise RaiseException('Exception') + if False : + x351 = fluid.layers.shape(input=x334) + x352 = x351[0] + x353 = len(x351) + x354 = x353 - 2 + x357 = x352 + for _x356 in range(x354): + x358 = _x356 + 2 + x359 = x351[x358] + x360 = x357 * x359 + x355 = x360 + x361 = x355 == 1 + if x361 : + raise RaiseException('Exception') + x364 = self.batchnorm7(x334) + """ + + def gen_name(id): + return "x" + str(id) + + self.pattern.add_layer( + "fluid.layers.shape", + inputs={'input': "bn-input-0"}, + outputs=[gen_name(0)]) + self.pattern.add_layer( + "prim.len", inputs={'input': gen_name(0)}, outputs=[gen_name(0)]) + self.pattern.add_layer( + "prim.ne", inputs={"x": gen_name(0)}, outputs=[gen_name(1)], y=4) + self.pattern.add_layer("prim.if", {'input': gen_name(1)}, [gen_name(2)]) + if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[-1]] + pattern_block0 = PaddleGraph(if_layer1, graph_type="dygraph") + pattern_block0.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(3)], + input="Exception") + if_layer1.add_block(pattern_block0) + pattern_block1 = PaddleGraph(if_layer1, graph_type="dygraph") + if_layer1.add_block(pattern_block1) + self.pattern.add_layer("prim.if", {}, [gen_name(4)], input=False) + if_layer2 = self.pattern.layers[list(self.pattern.layers.keys())[-1]] + pattern_block0 = PaddleGraph(if_layer2, graph_type="dygraph") + pattern_block0.add_layer( + "fluid.layers.shape", + inputs={'input': "bn-input-0"}, + outputs=[gen_name(5)]) + pattern_block0.add_layer( + "prim.getitem", + inputs={"list": gen_name(5)}, + outputs=[gen_name(6)], + index=0) + pattern_block0.add_layer( + "prim.len", inputs={"input": gen_name(5)}, outputs=[gen_name(7)]) + pattern_block0.add_layer( + "prim.sub", inputs={"x": gen_name(7)}, outputs=[gen_name(8)], y=2) + pattern_block0.add_layer( + "prim.equal", inputs={"input": gen_name(6)}, outputs=[gen_name(9)]) + pattern_block0.add_layer( + "prim.loop", + inputs={"input": gen_name(8)}, + outputs=[gen_name(8.1), gen_name(10)]) + loop_layer = pattern_block0.layers[list(pattern_block0.layers.keys())[ + -1]] + pattern_block0_block0 = PaddleGraph(loop_layer, graph_type="dygraph") + pattern_block0_block0.add_layer( + "prim.add", inputs={"x": gen_name(10)}, outputs=[gen_name(11)], y=2) + pattern_block0_block0.add_layer( + "prim.getitem", + inputs={"list": gen_name(5), + "index": gen_name(11)}, + outputs=[gen_name(12)]) + pattern_block0_block0.add_layer( + "prim.mul", + inputs={"x": gen_name(9), + "y": gen_name(12)}, + outputs=[gen_name(13)]) + pattern_block0_block0.add_layer( + "prim.equal", + inputs={"input": gen_name(13)}, + outputs=[gen_name(8.1)]) + loop_layer.inputs["input-1"] = gen_name(5) + loop_layer.inputs["input-2"] = gen_name(9) + loop_layer.add_block(pattern_block0_block0) + pattern_block0.add_layer( + "prim.eq", inputs={"x": gen_name(8.1)}, outputs=[gen_name(14)], y=1) + pattern_block0.add_layer( + "prim.if", inputs={"input": gen_name(14)}, outputs=[gen_name(15)]) + if_layer21 = pattern_block0.layers[list(pattern_block0.layers.keys())[ + -1]] + pattern_block0_block0 = PaddleGraph(if_layer21, graph_type="dygraph") + pattern_block0_block0.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(15)], + input="Exception") + if_layer21.add_block(pattern_block0_block0) + pattern_block0_block1 = PaddleGraph(if_layer21, graph_type="dygraph") + if_layer21.add_block(pattern_block0_block1) + if_layer2.add_block(pattern_block0) + pattern_block1 = PaddleGraph(if_layer2, graph_type="dygraph") + if_layer2.add_block(pattern_block1) + if_layer2.inputs["input-0"] = "bn-input-0" + self.pattern.add_layer( + "paddle.nn.BatchNorm", + inputs={"input": "bn-input-0"}, + outputs=[gen_name(16), gen_name(17)], + is_test=True, + num_channels=160, + momentum=0.1, + epsilon=0.001) + self.pattern.build(inputs={"input-0": "bn-input-0"}) + + def insert_new_layer(self, graph, parameters, matches): + new_layer = self.gen_new_layer(parameters, matches) + new_layer_id = list(matches.keys())[0] + graph.layers[new_layer_id] = new_layer + matches.pop(new_layer_id) + +# for layer in matches.values(): +# print(layer.outputs) +# print("-------") + + def gen_new_layer(self, parameters, matches): + layers_id = list(matches.keys()) + layer = matches[layers_id[-1]] + return layer diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/constant_fuse_pass.py b/x2paddle/optimizer/pytorch_optimizer/fusion/constant_fuse_pass.py new file mode 100644 index 0000000..c006284 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/constant_fuse_pass.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass +from x2paddle.optimizer.pytorch_optimizer.fusion import ConstantFuser +from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register + + +@pass_register +class ConstantFusePass(Pass): + name = "constant_fuse_pass" + + def __init__(self): + Pass.__init__(self) + + def apply(self, graph): + fuser = ConstantFuser() + fuser.operate(graph, match_kind="topo") + + +# 用于注册 +constant_fuse_pass = ConstantFuser() diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/constant_fuser.py b/x2paddle/optimizer/pytorch_optimizer/fusion/constant_fuser.py new file mode 100644 index 0000000..f036212 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/constant_fuser.py @@ -0,0 +1,63 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase +from x2paddle.core.program import PaddleGraph, PaddleLayer +from x2paddle.core.util import * + + +class ConstantFuser(FuseBase): + def __init__(self): + super(ConstantFuser, self).__init__(graph_type="dygraph") + + def build_pattern(self): + """ 描述需要替换的constant图结构。 + constant层模式python实现代码示例: + x3 = 10 + for _x70 in range(x3): + ... + """ + self.pattern.add_layer( + "prim.constant", inputs={}, outputs=["x1"], value=2) + self.pattern.build() + self.pattern.outputs = ["x1"] + + def insert_new_layer(self, graph, parameters, matches): + def replace_value(layer_connect, match_name, match_value): + for k, v in layer_connect.inputs.items(): + if v == match_name: + layer_connect.inputs.pop(k) + layer_connect.attrs[k] = match_value + break + for k, v in layer_connect.attrs.items(): + if v == match_name: + layer_connect.attrs[k] = match_value + break + if layer_connect.kernel == "prim.loop" or \ + layer_connect.kernel == "prim.if": + for block in layer_connect.blocks: + for b_layer_id, b_layer in block.layers.items(): + if block.edges_in.get(b_layer_id, 0) != 0 and \ + -1 in block.edges_in[b_layer_id]: + replace_value(b_layer, match_name, match_value) + + layer_id = list(matches.keys())[0] + layer = list(matches.values())[0] + layer_output_name = layer.outputs[0] + layer_value = layer.attrs["value"] + if graph.edges_out.get(layer_id, 0) != 0: + for layer_id_out in graph.edges_out[layer_id]: + layer_connect = graph.layers[layer_id_out] + replace_value(layer_connect, layer_output_name, layer_value) diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/dropout_fuse_pass.py b/x2paddle/optimizer/pytorch_optimizer/fusion/dropout_fuse_pass.py new file mode 100644 index 0000000..5c4cce4 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/dropout_fuse_pass.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass +from x2paddle.optimizer.pytorch_optimizer.fusion import DropoutFuser +from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register + + +@pass_register +class DropoutFusePass(Pass): + name = "dropout_fuse_pass" + + def __init__(self): + Pass.__init__(self) + + def apply(self, graph): + fuser = DropoutFuser() + fuser.operate(graph, match_kind="topo") + + +# 用于注册 +dropout_fuse_pass = DropoutFuser() diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/dropout_fuser.py b/x2paddle/optimizer/pytorch_optimizer/fusion/dropout_fuser.py new file mode 100644 index 0000000..bfece3f --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/dropout_fuser.py @@ -0,0 +1,60 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase +from x2paddle.core.program import PaddleGraph, PaddleLayer +from x2paddle.core.util import * + + +class DropoutFuser(FuseBase): + def __init__(self): + super(DropoutFuser, self).__init__(graph_type="dygraph") + + def build_pattern(self): + """ 描述需要替换的constant图结构。 + constant层模式python实现代码示例: + x3 = 10 + for _x70 in range(x3): + ... + """ + self.pattern.add_layer( + "paddle.nn.Dropout", + inputs={"input": "dropout-input-0"}, + outputs=["dropout0", "x1"]) + self.pattern.build(inputs={"input-0": "dropout-input-0"}) + self.pattern.outputs = ["dropout0", "x1"] + + def insert_new_layer(self, graph, parameters, matches): + def replace_value(layer_connect, match_name, match_input): + for k, v in layer_connect.inputs.items(): + if v == match_name: + layer_connect.inputs[k] = match_input + break + if layer_connect.kernel == "prim.loop" or \ + layer_connect.kernel == "prim.if": + for block in layer_connect.blocks: + for b_layer_id, b_layer in block.layers.items(): + if block.edges_in.get(b_layer_id, 0) != 0 and \ + -1 in block.edges_in[b_layer_id]: + replace_value(b_layer, match_name, match_input) + + layer_id = list(matches.keys())[0] + layer = list(matches.values())[0] + layer_output_name = layer.outputs[1] + layer_input = layer.inputs["input"] + if graph.edges_out.get(layer_id, 0) != 0: + for layer_id_out in graph.edges_out[layer_id]: + layer_connect = graph.layers[layer_id_out] + replace_value(layer_connect, layer_output_name, layer_input) diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/fc_fuse_pass.py b/x2paddle/optimizer/pytorch_optimizer/fusion/fc_fuse_pass.py new file mode 100644 index 0000000..c94b67b --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/fc_fuse_pass.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass +from x2paddle.optimizer.pytorch_optimizer.fusion import FcFuser +from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register + + +@pass_register +class FcFusePass(Pass): + name = "fc_fuse_pass" + + def __init__(self): + Pass.__init__(self) + + def apply(self, graph): + fuser = FcFuser() + fuser.operate(graph, match_kind="topo") + + +# 用于注册 +fc_fuse_pass = FcFusePass() diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/fc_fuser.py b/x2paddle/optimizer/pytorch_optimizer/fusion/fc_fuser.py new file mode 100644 index 0000000..079ab6f --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/fc_fuser.py @@ -0,0 +1,158 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase +from x2paddle.core.program import PaddleGraph, PaddleLayer +from x2paddle.core.util import * + + +class FcFuser(FuseBase): + def __init__(self): + self.linear_index = 0 + super(FcFuser, self).__init__(graph_type="dygraph") + + def build_pattern(self): + """ 描述需要替换的fc图结构。 + fc层模式python实现代码示例: + x133 = x128.shape + x133 = len(x133) + x134 = x133 == 2 + if x134 : + classifier_6_weight = self.classifier_6_weight + x136 = fluid.layers.transpose(x=classifier_6_weight, perm=[1, 0]) + classifier_6_bias = self.classifier_6_bias + x137 = paddle.addmm(input=classifier_6_bias, x=x128, y=x136, beta=1, alpha=1) + x135 = x137 + else: + classifier_6_weight = self.classifier_6_weight + x138 = fluid.layers.transpose(x=classifier_6_weight, perm=[1, 0]) + x139 = fluid.layers.matmul(x=x128, y=x138) + classifier_6_bias = self.classifier_6_bias + x140 = x139 + 1 * classifier_6_bias + x135 = x140 + """ + + def gen_name(id): + return "x" + str(id) + + self.pattern.add_layer( + "fluid.layers.shape", + inputs={'input': "fc-input-0"}, + outputs=[gen_name(2)]) + self.pattern.add_layer( + "prim.len", inputs={'input': gen_name(2)}, outputs=[gen_name(2)]) + self.pattern.add_layer( + "prim.eq", + inputs={"eq0": gen_name(2)}, + outputs=[gen_name(3)], + eq1=2) + self.pattern.add_layer("prim.if", {'input': gen_name(3)}, [gen_name(4)]) + self.pattern.outputs.append(gen_name(4)) + if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[-1]] + pattern_block0 = PaddleGraph(if_layer1, graph_type="dygraph") + pattern_block0.add_layer( + "fluid.dygraph.base.to_variable", + inputs={}, + outputs=[gen_name(5)], + value="params[{}]".format(string(gen_name(5)))) + pattern_block0.add_layer( + "fluid.layers.transpose", + inputs={"x": gen_name(5)}, + outputs=[gen_name(6)], + perm=[1, 0]) + pattern_block0.add_layer( + "fluid.dygraph.base.to_variable", + inputs={}, + outputs=[gen_name(7)], + value="params[{}]".format(string(gen_name(7)))) + pattern_block0.add_layer( + "paddle.addmm", + inputs={"input": gen_name(7), + "x": "fc-input-0", + "y": gen_name(6)}, + outputs=[gen_name(8)], + beta=1, + alpha=1) + if_layer1.inputs["input-0"] = "fc-input-0" + self.pattern.inputs.append("fc-input-0") + pattern_block0.add_layer( + "prim.equal", inputs={'input': gen_name(8)}, outputs=[gen_name(4)]) + if_layer1.add_block(pattern_block0) + pattern_block1 = PaddleGraph(if_layer1, graph_type="dygraph") + pattern_block1.add_layer( + "fluid.dygraph.base.to_variable", + inputs={}, + outputs=[gen_name(5)], + value="params[{}]".format(string(gen_name(5)))) + pattern_block1.add_layer( + "fluid.layers.transpose", + inputs={"x": gen_name(5)}, + outputs=[gen_name(6)], + perm=[1, 0]) + pattern_block1.add_layer( + "paddle.matmul", + inputs={"x": "fc-input-0", + "y": gen_name(6)}, + outputs=[gen_name(9)]) + if_layer1.inputs["input-1"] = "fc-input-0" + pattern_block1.add_layer( + "fluid.dygraph.base.to_variable", + inputs={}, + outputs=[gen_name(12)], + value="params[{}]".format(string(gen_name(12)))) + pattern_block1.add_layer( + "prim.add_", + inputs={"x": gen_name(9), + "y": gen_name(12)}, + outputs=[gen_name(13)], + alpha=1) + pattern_block1.add_layer( + "prim.equal", inputs={'input': gen_name(13)}, + outputs=[gen_name(4)]) + if_layer1.add_block(pattern_block1) + self.pattern.build(inputs={"input-0": "fc-input-0"}) + + def insert_new_layer(self, graph, parameters, matches): + new_layer = self.gen_new_layer(parameters, matches) + new_layer_id = list(matches.keys())[0] + graph.layers[new_layer_id] = new_layer + matches.pop(new_layer_id) + + def gen_new_layer(self, parameters, matches): + layers_id = list(matches.keys()) + layer = matches[layers_id[0]] + input_name = layer.inputs["input"] + layer = matches[layers_id[3]] + output_name = layer.outputs[0] + layer = matches[layers_id[4]] + weight_name = layer.attrs["value"][8:-2] + layer = matches[layers_id[6]] + bias_name = layer.attrs["value"][8:-2] + attrs = dict() + attrs["in_features"] = parameters[weight_name].shape[1] + attrs["out_features"] = parameters[weight_name].shape[0] + linear_name = "linear{}".format(self.linear_index) + self.linear_index += 1 + parameters["{}.weight".format(linear_name)] = parameters[ + weight_name].transpose((1, 0)) + parameters["{}.bias".format(linear_name)] = np.squeeze(parameters[ + bias_name]) + new_layer = PaddleLayer( + layers_id[0], + "paddle.nn.Linear", + inputs={"input": input_name}, + outputs=[linear_name, output_name], + **attrs) + return new_layer diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/interpolate_bilinear_fuse_pass.py b/x2paddle/optimizer/pytorch_optimizer/fusion/interpolate_bilinear_fuse_pass.py new file mode 100644 index 0000000..328e055 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/interpolate_bilinear_fuse_pass.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass +from x2paddle.optimizer.pytorch_optimizer.fusion import InterpolateBilinearFuser +from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register + + +@pass_register +class InterpolateBilinearFusePass(Pass): + name = "interpolate_bilinear_fuse_pass" + + def __init__(self): + Pass.__init__(self) + + def apply(self, graph): + fuser = InterpolateBilinearFuser() + fuser.operate(graph, match_kind="topo") + + +# 用于注册 +interpolate_bilinear_fuse_pass = InterpolateBilinearFusePass() diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/interpolate_bilinear_fuser.py b/x2paddle/optimizer/pytorch_optimizer/fusion/interpolate_bilinear_fuser.py new file mode 100644 index 0000000..a39ffb4 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/interpolate_bilinear_fuser.py @@ -0,0 +1,1552 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase +from x2paddle.core.program import PaddleGraph, PaddleLayer +from x2paddle.core.util import * + + +class InterpolateBilinearFuser(FuseBase): + def __init__(self): + super(InterpolateBilinearFuser, self).__init__(graph_type="dygraph") + import torch + torch_version = torch.__version__ + torch_version_part = torch_version.split(".") + if int(torch_version_part[0]) == 1 and int(torch_version_part[1]) > 5: + self.version_gt_150 = True + else: + self.version_gt_150 = False + + def build_pattern(self): + """ 描述需要替换的双线性插值图结构。 + interpolate_bilinear层模式python实现代码示例: + x3016 = fluid.layers.shape(input=x3005) + x3016 = len(x3016) + x3017 = x3016 - 2 + x3018 = [] + for _x3020 in range(x3017): + x3018.append(None) + x3021 = (x3005, x8, None, None) + x3022 = fluid.layers.shape(input=x3005) + x3022 = len(x3022) + x3023 = x3022 == 3 + if x3023 : + raise RaiseException('Exception') + x3024 = None + else: + x3026 = fluid.layers.shape(input=x3005) + x3026 = len(x3026) + x3027 = x3026 == 4 + if x3027 : + x3044, x3045, x3046, x3047 = x3021 + x3048 = x3045 is None + if x3048 : + x3051 = x3046 is None + x3049 = x3051 + x3050 = x3045 + else: + x3052 = x3045 + x3049 = False + x3050 = x3052 + if x3049 : + raise RaiseException('Exception') + x3055 = x3050 is not None + if x3055 : + x3058 = x3050 + x3059 = x3046 is not None + x3056 = x3059 + x3057 = x3058 + else: + x3056 = False + x3057 = x3050 + if x3056 : + raise RaiseException('Exception') + x3060 = None + x3061 = None + else: + x3060 = x3046 + x3061 = x3057 + x3063 = x3060 is not None + if x3063 : + x3065 = x3060 + x3066 = len(x3065) + x3067 = x3066 != 2 + if x3067 : + raise RaiseException('Exception') + x3064 = x3065 + else: + x3064 = x3060 + x3070 = x3061 is not None + if x3070 : + x3072 = x3061 + x3071 = x3072 + else: + x3071 = None + if x3070 : + x3073 = x3071 + else: + x3074 = x3064 is not None + if x3074 : + x3076 = x3064 + x3075 = x3076 + else: + raise RaiseException('Exception') + x3075 = None + x3078 = x3047 is None + if x3078 : + x3080 = len(x3075) + x3081 = x3080 > 0 + x3086 = 0 + for x3083 in range(2147483647): + x3087 = x3075[x3086] + x3088 = math.floor(x3087) + x3089 = x3088 != x3087 + if x3089 : + x3090 = False + x3091 = x3089 + else: + x3090 = None + x3091 = None + if x3089 : + x3092 = x3090 + x3093 = x3091 + else: + x3092 = True + x3093 = x3089 + x3094 = x3086 + 1 + x3095 = x3094 < x3080 + x3096 = x3095 and x3092 + x3082 = x3093 + x3083 = x3094 + if x3082 : + import warnings + warnings.warn('The default behavior for interpolate/upsample with float scale_factor will change in 1.6.0 to align with other frameworks/libraries, and use scale_factor directly, instead of relying on the computed output size. If you wish to keep the old behavior, please set recompute_scale_factor=True. See the documentation of nn.Upsample for details. ', stacklevel=2) + x3099 = [] + for _x3101 in range(2): + x3102 = _x3101 + 2 + x3103 = fluid.layers.shape(x3044)[x3102] + x3104 = float(x3103) + x3105 = x3075[_x3101] + x3106 = x3104 * x3105 + x3107 = math.floor(x3106) + x3099.append(x3107) + x3073 = x3099 + x3108 = x3018[0] + x3109 = x3018[1] + x3073_isinstance = isinstance(x3073, paddle.fluid.Variable) + if x3073_isinstance : + x3073 = x3073.numpy().tolist() + assert x3108 == x3109, 'The x3108 must be x3109!' + x3110 = paddle.nn.functional.interpolate(x=x3005, size=x3073, scale_factor=x3108, align_corners=False, align_mode=0) + x3028 = x3110 + else: + x3111 = fluid.layers.shape(input=x3005) + x3111 = len(x3111) + x3112 = x3111 == 5 + if x3112 : + raise RaiseException('Exception') + else: + raise RaiseException('Exception') + x3028 = None + x3024 = x3028 + """ + + def gen_name(id): + return "x" + str(id) + + if self.version_gt_150: + self.pattern.add_layer( + "fluid.layers.shape", + inputs={"input": "interpolate-input-0"}, + outputs=[gen_name(9)]) + self.pattern.add_layer( + "prim.len", + inputs={"input": gen_name(9)}, + outputs=[gen_name(9)]) + self.pattern.add_layer( + "prim.sub", + inputs={"x": gen_name(9)}, + outputs=[gen_name(10)], + y=2) + self.pattern.add_layer( + "prim.list", inputs={}, outputs=[gen_name(11)]) + self.pattern.add_layer( + "prim.loop", + inputs={"input": gen_name(10)}, + outputs=[gen_name(12.1), gen_name(12.2)]) + loop_layer = self.pattern.layers[list(self.pattern.layers.keys())[ + -1]] + pattern_block = PaddleGraph(loop_layer, graph_type="dygraph") + pattern_block.add_layer( + "prim.append", + inputs={"list": gen_name(11)}, + outputs=[], + element=None) + loop_layer.inputs["input-0"] = gen_name(11) + loop_layer.add_block(pattern_block) + self.pattern.add_layer( + "prim.tuple", + inputs={ + "input0": "interpolate-input-0", + "input1": "interpolate-input-1", + }, + outputs=[gen_name(13)], + input2=None, + input3=None) + self.pattern.add_layer( + "fluid.layers.shape", + inputs={"input": "interpolate-input-0"}, + outputs=[gen_name(14)]) + self.pattern.add_layer( + "prim.len", + inputs={"input": gen_name(14)}, + outputs=[gen_name(14)]) + self.pattern.add_layer( + "prim.eq", + inputs={"x": gen_name(14)}, + outputs=[gen_name(15)], + y=3) + self.pattern.add_layer( + "prim.if", + inputs={"input": gen_name(15)}, + outputs=[gen_name(16)]) + if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[ + -1]] + pattern_block = PaddleGraph(if_layer1, graph_type="dygraph") + pattern_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(17)], + input="Exception") + pattern_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(16)], input=None) + if_layer1.add_block(pattern_block) + pattern_block = PaddleGraph(if_layer1, graph_type="dygraph") + pattern_block.add_layer( + "fluid.layers.shape", + inputs={"input": "interpolate-input-0"}, + outputs=[gen_name(18)]) + pattern_block.add_layer( + "prim.len", + inputs={"input": gen_name(18)}, + outputs=[gen_name(18)]) + pattern_block.add_layer( + "prim.eq", + inputs={"x": gen_name(18)}, + outputs=[gen_name(19)], + y=4) + pattern_block.add_layer( + "prim.if", + inputs={"input": gen_name(19)}, + outputs=[gen_name(20)]) + if_layer2 = pattern_block.layers[list(pattern_block.layers.keys())[ + -1]] + pattern_block_block = PaddleGraph(if_layer2, graph_type="dygraph") + pattern_block_block.add_layer( + "prim.tuple_unpack", + inputs={"input": gen_name(13)}, + outputs=[ + gen_name(34), gen_name(35), gen_name(36), gen_name(37) + ]) + pattern_block_block.add_layer( + "prim.is", + inputs={"x": gen_name(35)}, + outputs=[gen_name(38)], + y=None) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(38)}, + outputs=[gen_name(39), gen_name(40)]) + if_layer3 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer3, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.is", + inputs={"x": gen_name(36)}, + outputs=[gen_name(41)], + y=None) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(41)}, + outputs=[gen_name(39)]) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(35)}, + outputs=[gen_name(40)]) + if_layer3.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer3, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(35)}, + outputs=[gen_name(42)]) + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(39)], input=False) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(35)}, + outputs=[gen_name(40)]) + if_layer3.add_block(pattern_block_block_block) + if_layer3.inputs.update({ + "input-0": gen_name(36), + 'input-1': gen_name(35), + 'input-2': gen_name(35), + }) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(39)}, + outputs=[gen_name(43)]) + if_layer4 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer4, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(44)], + input="Exception") + if_layer4.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer4, graph_type="dygraph") + if_layer4.add_block(pattern_block_block_block) + pattern_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(40)}, + outputs=[gen_name(45)], + y=None) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(45)}, + outputs=[gen_name(46), gen_name(47)]) + if_layer5 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer5, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(40)}, + outputs=[gen_name(48)]) + pattern_block_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(36)}, + outputs=[gen_name(49)], + y=None) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(49)}, + outputs=[gen_name(46)]) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(48)}, + outputs=[gen_name(47)]) + if_layer5.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer5, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(46)], input=False) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(40)}, + outputs=[gen_name(47)]) + if_layer5.add_block(pattern_block_block_block) + if_layer5.inputs.update({ + "input-0": gen_name(40), + "input-1": gen_name(36), + "input-3": gen_name(40) + }) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(46)}, + outputs=[gen_name(50), gen_name(51)]) + if_layer6 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer6, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(52)], + input="Exception") + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(50)], input=None) + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(51)], input=None) + if_layer6.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer6, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(36)}, + outputs=[gen_name(50)]) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(47)}, + outputs=[gen_name(51)]) + if_layer6.add_block(pattern_block_block_block) + if_layer6.inputs.update({ + "input-0": gen_name(36), + "input-1": gen_name(47) + }) + pattern_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(50)}, + outputs=[gen_name(53)], + y=None) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(53)}, + outputs=[gen_name(54)]) + if_layer7 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer7, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(50)}, + outputs=[gen_name(55)]) + pattern_block_block_block.add_layer( + "prim.len", + inputs={"input": gen_name(55)}, + outputs=[gen_name(56)]) + pattern_block_block_block.add_layer( + "prim.ne", + inputs={"x": gen_name(56)}, + outputs=[gen_name(57)], + y=2) + pattern_block_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(57)}, + outputs=[gen_name(58)]) + if_layer8 = pattern_block_block_block.layers[list( + pattern_block_block_block.layers.keys())[-1]] + pattern_block_block_block_block = PaddleGraph( + if_layer8, graph_type="dygraph") + pattern_block_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(59)], + input="Exception") + if_layer8.add_block(pattern_block_block_block_block) + pattern_block_block_block_block = PaddleGraph( + if_layer8, graph_type="dygraph") + if_layer8.add_block(pattern_block_block_block_block) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(55)}, + outputs=[gen_name(54)]) + if_layer7.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer7, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(50)}, + outputs=[gen_name(54)]) + if_layer7.add_block(pattern_block_block_block) + if_layer7.inputs.update({ + "input-0": gen_name(50), + "input-1": gen_name(50) + }) + pattern_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(51)}, + outputs=[gen_name(60)], + y=None) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(60)}, + outputs=[gen_name(61)]) + if_layer9 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer9, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(51)}, + outputs=[gen_name(62)]) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(62)}, + outputs=[gen_name(61)]) + if_layer9.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer9, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(54)}, + outputs=[gen_name(64)], + y=None) + pattern_block_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(64)}, + outputs=[gen_name(65)]) + if_layer11 = pattern_block_block_block.layers[list( + pattern_block_block_block.layers.keys())[-1]] + pattern_block_block_block_block = PaddleGraph( + if_layer11, graph_type="dygraph") + pattern_block_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(54)}, + outputs=[gen_name(66)]) + pattern_block_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(66)}, + outputs=[gen_name(65)]) + if_layer11.add_block(pattern_block_block_block_block) + pattern_block_block_block_block = PaddleGraph( + if_layer11, graph_type="dygraph") + pattern_block_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(67)], + input="Exception") + pattern_block_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(65)], input=None) + if_layer11.add_block(pattern_block_block_block_block) + if_layer11.inputs.update({"input-0": gen_name(54), }) + pattern_block_block_block.add_layer( + "prim.is", + inputs={"x": gen_name(37)}, + outputs=[gen_name(68)], + y=None) + pattern_block_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(68)}, + outputs=[gen_name(69)]) + if_layer12 = pattern_block_block_block.layers[list( + pattern_block_block_block.layers.keys())[-1]] + pattern_block_block_block_block = PaddleGraph( + if_layer12, graph_type="dygraph") + pattern_block_block_block_block.add_layer( + "prim.len", + inputs={"input": gen_name(65)}, + outputs=[gen_name(70)]) + pattern_block_block_block_block.add_layer( + "prim.gt", + inputs={"x": gen_name(70)}, + outputs=[gen_name(71)], + y=0) + pattern_block_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(72)], input=0) + pattern_block_block_block_block.add_layer( + "prim.loop", + inputs={}, + outputs=[gen_name(74), gen_name(75), gen_name(76.1)], + input=2147483647) + loop_layer = pattern_block_block_block_block.layers[list( + pattern_block_block_block_block.layers.keys())[-1]] + pattern_loop_block = PaddleGraph(loop_layer, graph_type="dygraph") + pattern_loop_block.add_layer( + "prim.getitem", + inputs={"list": gen_name(65), + "element": gen_name(72)}, + outputs=[gen_name(74.1)]) + pattern_loop_block.add_layer( + "prim.floor", + inputs={"input": gen_name(74.1)}, + outputs=[gen_name(75.1)]) + pattern_loop_block.add_layer( + "prim.ne", + inputs={"x": gen_name(75.1), + "y": gen_name(74.1)}, + outputs=[gen_name(76)]) + pattern_loop_block.add_layer( + "prim.if", + inputs={"input": gen_name(76)}, + outputs=[gen_name(77)]) + if_layer13 = pattern_loop_block.layers[list( + pattern_loop_block.layers.keys())[-1]] + pattern_loop_block_block = PaddleGraph( + if_layer13, graph_type="dygraph") + pattern_loop_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(77)], input=False) + if_layer13.add_block(pattern_loop_block_block) + pattern_loop_block_block = PaddleGraph( + if_layer13, graph_type="dygraph") + pattern_loop_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(77)], input=True) + if_layer13.add_block(pattern_loop_block_block) + pattern_loop_block.add_layer( + "prim.add", + inputs={"x": gen_name(72)}, + outputs=[gen_name(81)], + y=1) + pattern_loop_block.add_layer( + "prim.lt", + inputs={"x": gen_name(81), + "y": gen_name(70)}, + outputs=[gen_name(82)]) + pattern_loop_block.add_layer( + "prim.and", + inputs={"x": gen_name(82), + "y": gen_name(77)}, + outputs=[gen_name(83)]) + pattern_loop_block.add_layer( + "prim.equal", + inputs={"input": gen_name(76)}, + outputs=[gen_name(74)]) + pattern_loop_block.add_layer( + "prim.equal", + inputs={"input": gen_name(81)}, + outputs=[gen_name(75)]) + loop_layer.add_block(pattern_loop_block) + loop_layer.inputs.update({ + "input-0": gen_name(65), + "input-1": gen_name(72), + "input-2": gen_name(72), + "input-3": gen_name(70) + }) + pattern_block_block_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(74)}, + outputs=[gen_name(84)]) + if_layer15 = pattern_block_block_block_block.layers[list( + pattern_block_block_block_block.layers.keys())[-1]] + pattern_block_block_block_block_block = PaddleGraph( + if_layer15, graph_type="dygraph") + pattern_block_block_block_block_block.add_layer( + "prim.warnings", + inputs={}, + outputs=[gen_name(85)], + stacklevel=2, + input="...") + if_layer15.add_block(pattern_block_block_block_block_block) + pattern_block_block_block_block_block = PaddleGraph( + if_layer15, graph_type="dygraph") + if_layer15.add_block(pattern_block_block_block_block_block) + if_layer12.add_block(pattern_block_block_block_block) + pattern_block_block_block_block = PaddleGraph( + if_layer12, graph_type="dygraph") + if_layer12.add_block(pattern_block_block_block_block) + if_layer12.inputs.update({ + "input-0": gen_name(65), + "input-1": gen_name(65), + }) + pattern_block_block_block.add_layer( + "prim.list", inputs={}, outputs=[gen_name(86)]) + pattern_block_block_block.add_layer( + "prim.loop", + inputs={}, + outputs=[gen_name(87), gen_name(88)], + input=2) + loop_layer = pattern_block_block_block.layers[list( + pattern_block_block_block.layers.keys())[-1]] + pattern_loop_block = PaddleGraph(loop_layer, graph_type="dygraph") + pattern_loop_block.add_layer( + "prim.add", + inputs={"x": gen_name(88)}, + outputs=[gen_name(89)], + y=2) + pattern_loop_block.add_layer( + "prim.shape_dim", + inputs={"input": gen_name(34), + "dim": gen_name(89)}, + outputs=[gen_name(90)]) + pattern_loop_block.add_layer( + "prim.float", + inputs={"input": gen_name(90)}, + outputs=[gen_name(91)]) + pattern_loop_block.add_layer( + "prim.getitem", + inputs={"list": gen_name(65), + "element": gen_name(88)}, + outputs=[gen_name(92)]) + pattern_loop_block.add_layer( + "prim.mul", + inputs={"x": gen_name(91), + "y": gen_name(92)}, + outputs=[gen_name(93)]) + pattern_loop_block.add_layer( + "prim.floor", + inputs={"input": gen_name(93)}, + outputs=[gen_name(94)]) + pattern_loop_block.add_layer( + "prim.append", + inputs={"list": gen_name(86), + "element": gen_name(94)}, + outputs=[]) + loop_layer.add_block(pattern_loop_block) + loop_layer.inputs.update({ + "input-0": gen_name(34), + "input-1": gen_name(65), + "input-2": gen_name(86) + }) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(86)}, + outputs=[gen_name(61)]) + if_layer9.add_block(pattern_block_block_block) + if_layer9.inputs.update({ + "input-0": gen_name(51), + "input-1": gen_name(54), + "input-2": gen_name(54), + "input-3": gen_name(37), + "input-4": gen_name(34) + }) + pattern_block_block.add_layer( + "prim.getitem", + inputs={"list": gen_name(11)}, + outputs=[gen_name(95)], + element=0) + pattern_block_block.add_layer( + "prim.getitem", + inputs={"list": gen_name(11)}, + outputs=[gen_name(96)], + element=1) + pattern_block_block.add_layer( + "prim.isinstance", + inputs={"input": gen_name(61)}, + outputs=["interpolate-input-0_isinstance"], + cls="paddle.fluid.Variable") + pattern_block_block.add_layer( + "prim.if", {"input": "interpolate-input-0_isinstance"}, + outputs=["interpolate-input-0_if1"]) + if_layer_isinstance = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer_isinstance, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.var2list", + inputs={"input": gen_name(61)}, + outputs=[gen_name(61)]) + if_layer_isinstance.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer_isinstance, graph_type="dygraph") + if_layer_isinstance.add_block(pattern_block_block_block) + if_layer_isinstance.inputs["input-0"] = gen_name(61) + pattern_block_block.add_layer( + "prim.assert", + inputs={"key": gen_name(95), + "value": gen_name(96)}, + outputs=[gen_name(97) + "_assert"], + type="eq") + pattern_block_block.add_layer( + "paddle.nn.functional.interpolate", + inputs={ + "input": "interpolate-input-0", + "size": gen_name(61), + "scale_factor": gen_name(95) + }, + outputs=[gen_name(97)], + align_corners=False, + align_mode=0) + pattern_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(97)}, + outputs=[gen_name(20)]) + if_layer2.add_block(pattern_block_block) + pattern_block_block = PaddleGraph(if_layer2, graph_type="dygraph") + pattern_block_block.add_layer( + "fluid.layers.shape", + inputs={"input": "interpolate-input-0"}, + outputs=[gen_name(98)]) + pattern_block_block.add_layer( + "prim.len", + inputs={"input": gen_name(98)}, + outputs=[gen_name(98)]) + pattern_block_block.add_layer( + "prim.eq", + inputs={"x": gen_name(98)}, + outputs=[gen_name(99)], + y=5) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(99)}, + outputs=[gen_name(100)]) + if_layer16 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer16, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(101)], + input="Exception") + if_layer16.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer16, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(102)], + input="Exception") + if_layer16.add_block(pattern_block_block_block) + pattern_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(20)], input=None) + if_layer2.add_block(pattern_block_block) + if_layer2.inputs.update({ + "input-0": gen_name(13), + "input-1": gen_name(13), + "input-2": "interpolate-input-0", + "input-3": gen_name(11), + "input-5": gen_name(11), + }) + pattern_block.add_layer( + "prim.equal", + inputs={"input": gen_name(20)}, + outputs=[gen_name(16)]) + if_layer1.add_block(pattern_block) + if_layer1.inputs.update({ + "input-2": "interpolate-input-0", + "input-4": gen_name(13), + "input-7": gen_name(11), + "input-9": gen_name(11), + "input-11": "interpolate-input-0", + "input-12": "interpolate-input-0", + }) + self.pattern.build(inputs={ + "input-0": "interpolate-input-0", + "input-1": "interpolate-input-1" + }) + else: + self.pattern.add_layer( + "fluid.layers.shape", + inputs={"input": "interpolate-input-0"}, + outputs=[gen_name(9)]) + self.pattern.add_layer( + "prim.len", + inputs={"input": gen_name(9)}, + outputs=[gen_name(9)]) + self.pattern.add_layer( + "prim.sub", + inputs={"x": gen_name(9)}, + outputs=[gen_name(10)], + y=2) + self.pattern.add_layer( + "prim.list", inputs={}, outputs=[gen_name(11)]) + self.pattern.add_layer( + "prim.loop", + inputs={"input": gen_name(10)}, + outputs=[gen_name(12.1), gen_name(12.2)]) + loop_layer = self.pattern.layers[list(self.pattern.layers.keys())[ + -1]] + pattern_block = PaddleGraph(loop_layer, graph_type="dygraph") + pattern_block.add_layer( + "prim.append", + inputs={"list": gen_name(11)}, + outputs=[], + element=None) + loop_layer.inputs["input-0"] = gen_name(11) + loop_layer.add_block(pattern_block) + self.pattern.add_layer( + "prim.tuple", + inputs={ + "input0": "interpolate-input-0", + "input1": "interpolate-input-1", + }, + outputs=[gen_name(13)], + input2=None, + input3=None) + self.pattern.add_layer( + "fluid.layers.shape", + inputs={"input": "interpolate-input-0"}, + outputs=[gen_name(14)]) + self.pattern.add_layer( + "prim.len", + inputs={"input": gen_name(14)}, + outputs=[gen_name(14)]) + self.pattern.add_layer( + "prim.eq", + inputs={"x": gen_name(14)}, + outputs=[gen_name(15)], + y=3) + self.pattern.add_layer( + "prim.if", + inputs={"input": gen_name(15)}, + outputs=[gen_name(16)]) + if_layer1 = self.pattern.layers[list(self.pattern.layers.keys())[ + -1]] + pattern_block = PaddleGraph(if_layer1, graph_type="dygraph") + pattern_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(17)], + input="Exception") + pattern_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(16)], input=None) + if_layer1.add_block(pattern_block) + pattern_block = PaddleGraph(if_layer1, graph_type="dygraph") + pattern_block.add_layer( + "fluid.layers.shape", + inputs={"input": "interpolate-input-0"}, + outputs=[gen_name(18)]) + pattern_block.add_layer( + "prim.len", + inputs={"input": gen_name(18)}, + outputs=[gen_name(18)]) + pattern_block.add_layer( + "prim.eq", + inputs={"x": gen_name(18)}, + outputs=[gen_name(19)], + y=4) + pattern_block.add_layer( + "prim.if", + inputs={"input": gen_name(19)}, + outputs=[gen_name(20)]) + if_layer2 = pattern_block.layers[list(pattern_block.layers.keys())[ + -1]] + pattern_block_block = PaddleGraph(if_layer2, graph_type="dygraph") + pattern_block_block.add_layer( + "prim.tuple_unpack", + inputs={"input": gen_name(13)}, + outputs=[ + gen_name(34), gen_name(35), gen_name(36), gen_name(37) + ]) + pattern_block_block.add_layer( + "prim.is", + inputs={"x": gen_name(35)}, + outputs=[gen_name(38)], + y=None) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(38)}, + outputs=[gen_name(39), gen_name(40)]) + if_layer3 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer3, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.is", + inputs={"x": gen_name(36)}, + outputs=[gen_name(41)], + y=None) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(41)}, + outputs=[gen_name(39)]) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(35)}, + outputs=[gen_name(40)]) + if_layer3.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer3, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(35)}, + outputs=[gen_name(42)]) + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(39)], input=False) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(35)}, + outputs=[gen_name(40)]) + if_layer3.add_block(pattern_block_block_block) + if_layer3.inputs.update({ + "input-0": gen_name(36), + 'input-1': gen_name(35), + 'input-2': gen_name(35), + }) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(39)}, + outputs=[gen_name(43)]) + if_layer4 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer4, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(44)], + input="Exception") + if_layer4.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer4, graph_type="dygraph") + if_layer4.add_block(pattern_block_block_block) + pattern_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(40)}, + outputs=[gen_name(45)], + y=None) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(45)}, + outputs=[gen_name(46), gen_name(47)]) + if_layer5 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer5, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(40)}, + outputs=[gen_name(48)]) + pattern_block_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(36)}, + outputs=[gen_name(49)], + y=None) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(49)}, + outputs=[gen_name(46)]) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(48)}, + outputs=[gen_name(47)]) + if_layer5.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer5, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(46)], input=False) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(40)}, + outputs=[gen_name(47)]) + if_layer5.add_block(pattern_block_block_block) + if_layer5.inputs.update({ + "input-0": gen_name(40), + "input-1": gen_name(36), + "input-3": gen_name(40) + }) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(46)}, + outputs=[gen_name(50), gen_name(51)]) + if_layer6 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer6, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(52)], + input="Exception") + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(50)], input=None) + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(51)], input=None) + if_layer6.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer6, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(36)}, + outputs=[gen_name(50)]) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(47)}, + outputs=[gen_name(51)]) + if_layer6.add_block(pattern_block_block_block) + if_layer6.inputs.update({ + "input-0": gen_name(36), + "input-1": gen_name(47) + }) + pattern_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(50)}, + outputs=[gen_name(53)], + y=None) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(53)}, + outputs=[gen_name(54)]) + if_layer7 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer7, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(50)}, + outputs=[gen_name(55)]) + pattern_block_block_block.add_layer( + "prim.len", + inputs={"input": gen_name(55)}, + outputs=[gen_name(56)]) + pattern_block_block_block.add_layer( + "prim.ne", + inputs={"x": gen_name(56)}, + outputs=[gen_name(57)], + y=2) + pattern_block_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(57)}, + outputs=[gen_name(58)]) + if_layer8 = pattern_block_block_block.layers[list( + pattern_block_block_block.layers.keys())[-1]] + pattern_block_block_block_block = PaddleGraph( + if_layer8, graph_type="dygraph") + pattern_block_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(59)], + input="Exception") + if_layer8.add_block(pattern_block_block_block_block) + pattern_block_block_block_block = PaddleGraph( + if_layer8, graph_type="dygraph") + if_layer8.add_block(pattern_block_block_block_block) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(55)}, + outputs=[gen_name(54)]) + if_layer7.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer7, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(50)}, + outputs=[gen_name(54)]) + if_layer7.add_block(pattern_block_block_block) + if_layer7.inputs.update({ + "input-0": gen_name(50), + "input-1": gen_name(50) + }) + pattern_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(51)}, + outputs=[gen_name(60)], + y=None) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(60)}, + outputs=[gen_name(61)]) + if_layer9 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer9, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(51)}, + outputs=[gen_name(62)]) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(62)}, + outputs=[gen_name(61)]) + if_layer9.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer9, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(61)], input=None) + if_layer9.add_block(pattern_block_block_block) + if_layer9.inputs.update({"input-0": gen_name(51)}) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(60)}, + outputs=[gen_name(63)]) + if_layer10 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer10, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(61)}, + outputs=[gen_name(63)]) + if_layer10.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer10, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.isnot", + inputs={"x": gen_name(54)}, + outputs=[gen_name(64)], + y=None) + pattern_block_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(64)}, + outputs=[gen_name(65)]) + if_layer11 = pattern_block_block_block.layers[list( + pattern_block_block_block.layers.keys())[-1]] + pattern_block_block_block_block = PaddleGraph( + if_layer11, graph_type="dygraph") + pattern_block_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(54)}, + outputs=[gen_name(66)]) + pattern_block_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(66)}, + outputs=[gen_name(65)]) + if_layer11.add_block(pattern_block_block_block_block) + pattern_block_block_block_block = PaddleGraph( + if_layer11, graph_type="dygraph") + pattern_block_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(67)], + input="Exception") + pattern_block_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(65)], input=None) + if_layer11.add_block(pattern_block_block_block_block) + if_layer11.inputs.update({"input-0": gen_name(54), }) + pattern_block_block_block.add_layer( + "prim.is", + inputs={"x": gen_name(37)}, + outputs=[gen_name(68)], + y=None) + pattern_block_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(68)}, + outputs=[gen_name(69)]) + if_layer12 = pattern_block_block_block.layers[list( + pattern_block_block_block.layers.keys())[-1]] + pattern_block_block_block_block = PaddleGraph( + if_layer12, graph_type="dygraph") + pattern_block_block_block_block.add_layer( + "prim.len", + inputs={"input": gen_name(65)}, + outputs=[gen_name(70)]) + pattern_block_block_block_block.add_layer( + "prim.gt", + inputs={"x": gen_name(70)}, + outputs=[gen_name(71)], + y=0) + pattern_block_block_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(72)], input=0) + pattern_block_block_block_block.add_layer( + "prim.loop", + inputs={}, + outputs=[gen_name(74), gen_name(75), gen_name(76.1)], + input=2147483647) + loop_layer = pattern_block_block_block_block.layers[list( + pattern_block_block_block_block.layers.keys())[-1]] + pattern_loop_block = PaddleGraph(loop_layer, graph_type="dygraph") + pattern_loop_block.add_layer( + "prim.getitem", + inputs={"list": gen_name(65), + "element": gen_name(72)}, + outputs=[gen_name(74.1)]) + pattern_loop_block.add_layer( + "prim.floor", + inputs={"input": gen_name(74.1)}, + outputs=[gen_name(75.1)]) + pattern_loop_block.add_layer( + "prim.ne", + inputs={"x": gen_name(75.1), + "y": gen_name(74.1)}, + outputs=[gen_name(76)]) + pattern_loop_block.add_layer( + "prim.if", + inputs={"input": gen_name(76)}, + outputs=[gen_name(77), gen_name(78)]) + if_layer13 = pattern_loop_block.layers[list( + pattern_loop_block.layers.keys())[-1]] + pattern_loop_block_block = PaddleGraph( + if_layer13, graph_type="dygraph") + pattern_loop_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(77)], input=False) + pattern_loop_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(76)}, + outputs=[gen_name(78)]) + if_layer13.add_block(pattern_loop_block_block) + pattern_loop_block_block = PaddleGraph( + if_layer13, graph_type="dygraph") + pattern_loop_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(77)], input=None) + pattern_loop_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(78)], input=None) + if_layer13.add_block(pattern_loop_block_block) + if_layer13.inputs.update({"input-0": gen_name(76), }) + pattern_loop_block.add_layer( + "prim.if", + inputs={"input": gen_name(76)}, + outputs=[gen_name(79), gen_name(80)]) + if_layer14 = pattern_loop_block.layers[list( + pattern_loop_block.layers.keys())[-1]] + pattern_loop_block_block = PaddleGraph( + if_layer14, graph_type="dygraph") + pattern_loop_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(77)}, + outputs=[gen_name(79)]) + pattern_loop_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(78)}, + outputs=[gen_name(80)]) + if_layer14.add_block(pattern_loop_block_block) + pattern_loop_block_block = PaddleGraph( + if_layer14, graph_type="dygraph") + pattern_loop_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(79)], input=True) + pattern_loop_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(76)}, + outputs=[gen_name(80)]) + if_layer14.add_block(pattern_loop_block_block) + if_layer14.inputs.update({ + "input-0": gen_name(77), + "input-1": gen_name(78), + "input-2": gen_name(76) + }) + pattern_loop_block.add_layer( + "prim.add", + inputs={"x": gen_name(72)}, + outputs=[gen_name(81)], + y=1) + pattern_loop_block.add_layer( + "prim.lt", + inputs={"x": gen_name(81), + "y": gen_name(70)}, + outputs=[gen_name(82)]) + pattern_loop_block.add_layer( + "prim.and", + inputs={"x": gen_name(82), + "y": gen_name(79)}, + outputs=[gen_name(83)]) + pattern_loop_block.add_layer( + "prim.equal", + inputs={"input": gen_name(80)}, + outputs=[gen_name(74)]) + pattern_loop_block.add_layer( + "prim.equal", + inputs={"input": gen_name(81)}, + outputs=[gen_name(75)]) + loop_layer.add_block(pattern_loop_block) + loop_layer.inputs.update({ + "input-0": gen_name(65), + "input-1": gen_name(72), + "input-2": gen_name(72), + "input-3": gen_name(70) + }) + pattern_block_block_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(74)}, + outputs=[gen_name(84)]) + if_layer15 = pattern_block_block_block_block.layers[list( + pattern_block_block_block_block.layers.keys())[-1]] + pattern_block_block_block_block_block = PaddleGraph( + if_layer15, graph_type="dygraph") + pattern_block_block_block_block_block.add_layer( + "prim.warnings", + inputs={}, + outputs=[gen_name(85)], + stacklevel=2, + input="...") + if_layer15.add_block(pattern_block_block_block_block_block) + pattern_block_block_block_block_block = PaddleGraph( + if_layer15, graph_type="dygraph") + if_layer15.add_block(pattern_block_block_block_block_block) + if_layer12.add_block(pattern_block_block_block_block) + pattern_block_block_block_block = PaddleGraph( + if_layer12, graph_type="dygraph") + if_layer12.add_block(pattern_block_block_block_block) + if_layer12.inputs.update({ + "input-0": gen_name(65), + "input-1": gen_name(65), + }) + pattern_block_block_block.add_layer( + "prim.list", inputs={}, outputs=[gen_name(86)]) + pattern_block_block_block.add_layer( + "prim.loop", + inputs={}, + outputs=[gen_name(87), gen_name(88)], + input=2) + loop_layer = pattern_block_block_block.layers[list( + pattern_block_block_block.layers.keys())[-1]] + pattern_loop_block = PaddleGraph(loop_layer, graph_type="dygraph") + pattern_loop_block.add_layer( + "prim.add", + inputs={"x": gen_name(88)}, + outputs=[gen_name(89)], + y=2) + pattern_loop_block.add_layer( + "prim.shape_dim", + inputs={"input": gen_name(34), + "dim": gen_name(89)}, + outputs=[gen_name(90)]) + pattern_loop_block.add_layer( + "prim.float", + inputs={"input": gen_name(90)}, + outputs=[gen_name(91)]) + pattern_loop_block.add_layer( + "prim.getitem", + inputs={"list": gen_name(65), + "element": gen_name(88)}, + outputs=[gen_name(92)]) + pattern_loop_block.add_layer( + "prim.mul", + inputs={"x": gen_name(91), + "y": gen_name(92)}, + outputs=[gen_name(93)]) + pattern_loop_block.add_layer( + "prim.floor", + inputs={"input": gen_name(93)}, + outputs=[gen_name(94)]) + pattern_loop_block.add_layer( + "prim.append", + inputs={"list": gen_name(86), + "element": gen_name(94)}, + outputs=[]) + loop_layer.add_block(pattern_loop_block) + loop_layer.inputs.update({ + "input-0": gen_name(34), + "input-1": gen_name(65), + "input-2": gen_name(86) + }) + pattern_block_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(86)}, + outputs=[gen_name(63)]) + if_layer10.add_block(pattern_block_block_block) + if_layer10.inputs.update({ + "input-0": gen_name(61), + "input-1": gen_name(54), + "input-2": gen_name(54), + "input-3": gen_name(37), + "input-4": gen_name(34) + }) + pattern_block_block.add_layer( + "prim.getitem", + inputs={"list": gen_name(11)}, + outputs=[gen_name(95)], + element=0) + pattern_block_block.add_layer( + "prim.getitem", + inputs={"list": gen_name(11)}, + outputs=[gen_name(96)], + element=1) + pattern_block_block.add_layer( + "prim.isinstance", + inputs={"input": gen_name(63)}, + outputs=["interpolate-input-0_isinstance"], + cls="paddle.fluid.Variable") + pattern_block_block.add_layer( + "prim.if", {"input": "interpolate-input-0_isinstance"}, + outputs=["interpolate-input-0_if1"]) + if_layer_isinstance = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer_isinstance, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.var2list", + inputs={"input": gen_name(63)}, + outputs=[gen_name(63)]) + if_layer_isinstance.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer_isinstance, graph_type="dygraph") + if_layer_isinstance.add_block(pattern_block_block_block) + if_layer_isinstance.inputs["input-0"] = gen_name(63) + pattern_block_block.add_layer( + "prim.assert", + inputs={"key": gen_name(95), + "value": gen_name(96)}, + outputs=[gen_name(97) + "_assert"], + type="eq") + pattern_block_block.add_layer( + "paddle.nn.functional.interpolate", + inputs={ + "input": "interpolate-input-0", + "size": gen_name(63), + "scale_factor": gen_name(95) + }, + outputs=[gen_name(97)], + align_corners=False, + align_mode=0) + pattern_block_block.add_layer( + "prim.equal", + inputs={"input": gen_name(97)}, + outputs=[gen_name(20)]) + if_layer2.add_block(pattern_block_block) + pattern_block_block = PaddleGraph(if_layer2, graph_type="dygraph") + pattern_block_block.add_layer( + "fluid.layers.shape", + inputs={"input": "interpolate-input-0"}, + outputs=[gen_name(98)]) + pattern_block_block.add_layer( + "prim.len", + inputs={"input": gen_name(98)}, + outputs=[gen_name(98)]) + pattern_block_block.add_layer( + "prim.eq", + inputs={"x": gen_name(98)}, + outputs=[gen_name(99)], + y=5) + pattern_block_block.add_layer( + "prim.if", + inputs={"input": gen_name(99)}, + outputs=[gen_name(100)]) + if_layer16 = pattern_block_block.layers[list( + pattern_block_block.layers.keys())[-1]] + pattern_block_block_block = PaddleGraph( + if_layer16, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(101)], + input="Exception") + if_layer16.add_block(pattern_block_block_block) + pattern_block_block_block = PaddleGraph( + if_layer16, graph_type="dygraph") + pattern_block_block_block.add_layer( + "prim.exception", + inputs={}, + outputs=[gen_name(102)], + input="Exception") + if_layer16.add_block(pattern_block_block_block) + pattern_block_block.add_layer( + "prim.equal", inputs={}, outputs=[gen_name(20)], input=None) + if_layer2.add_block(pattern_block_block) + if_layer2.inputs.update({ + "input-0": gen_name(13), + "input-1": gen_name(13), + "input-2": "interpolate-input-0", + "input-3": gen_name(11), + "input-5": gen_name(11), + }) + pattern_block.add_layer( + "prim.equal", + inputs={"input": gen_name(20)}, + outputs=[gen_name(16)]) + if_layer1.add_block(pattern_block) + if_layer1.inputs.update({ + "input-2": "interpolate-input-0", + "input-4": gen_name(13), + "input-7": gen_name(11), + "input-9": gen_name(11), + "input-11": "interpolate-input-0", + "input-12": "interpolate-input-0", + }) + self.pattern.build(inputs={ + "input-0": "interpolate-input-0", + "input-1": "interpolate-input-1" + }) + + def insert_new_layer(self, graph, parameters, matches): + new_layers = self.gen_new_layer(parameters, matches) + new_layer_id = list(matches.keys())[0] + graph.layers[new_layer_id] = new_layers[0] + matches.pop(new_layer_id) + new_layer_id = list(matches.keys())[0] + graph.layers[new_layer_id] = new_layers[1] + block_layer = new_layers[1].blocks[0].layers.pop( + list(new_layers[1].blocks[0].layers.keys())[-1]) + new_layers[1].blocks[0].layers[new_layer_id + ".0.0"] = block_layer + matches.pop(new_layer_id) + new_layer_id = list(matches.keys())[0] + graph.layers[new_layer_id] = new_layers[2] + matches.pop(new_layer_id) + + def gen_new_layer(self, parameters, matches): + layers = list() + layers_id = list(matches.keys()) + layer = matches[layers_id[6]] + size = layer.inputs["input1"] + layer = matches[layers_id[92]] + layer.inputs["input"] = size + layers.append(layer) + layer = matches[layers_id[93]] + block_layer = layer.blocks[0].layers[list(layer.blocks[0].layers.keys()) + [0]] + block_layer.inputs["input"] = size + block_layer.outputs[0] = size + layer.inputs["input-0"] = size + layers.append(layer) + layer = matches[layers_id[-1]] + outputs = layer.outputs + layer = matches[layers_id[96]] + layer.inputs.pop("scale_factor") + layer.inputs["size"] = size + layer.outputs = outputs + layers.append(layer) + return layers diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/reshape_fuse_pass.py b/x2paddle/optimizer/pytorch_optimizer/fusion/reshape_fuse_pass.py new file mode 100644 index 0000000..375b50b --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/reshape_fuse_pass.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass +from x2paddle.optimizer.pytorch_optimizer.fusion import ReshapeFuser +from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register + + +@pass_register +class ReshapeFusePass(Pass): + name = "reshape_fuse_pass" + + def __init__(self): + Pass.__init__(self) + + def apply(self, graph): + fuser = ReshapeFuser() + fuser.operate(graph, match_kind="edge") + + +# 用于注册 +reshape_fuse_pass = ReshapeFusePass() diff --git a/x2paddle/optimizer/pytorch_optimizer/fusion/reshape_fuser.py b/x2paddle/optimizer/pytorch_optimizer/fusion/reshape_fuser.py new file mode 100644 index 0000000..b489f97 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/fusion/reshape_fuser.py @@ -0,0 +1,73 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase +from x2paddle.core.program import PaddleGraph, PaddleLayer +from x2paddle.core.util import * + + +class ReshapeFuser(FuseBase): + def __init__(self): + super(ReshapeFuser, self).__init__(graph_type="dygraph") + + def build_pattern(self): + """ 描述需要替换的reshape图结构。 + reshape层模式python实现代码示例: + x165 = int(x164) + x166 = [x158, x159, x165] + x167 = fluid.layers.reshape(x=x157, shape=x166) + """ + + def gen_name(id): + return "x" + str(id) + + self.pattern.add_layer( + "prim.int", + inputs={"input": "reshape-input-0"}, + outputs=[gen_name(0)]) + self.pattern.add_layer( + "prim.list", + inputs={ + "input0": "reshape-input-1", + "input1": "reshape-input-2", + "input2": gen_name(0) + }, + outputs=[gen_name(1)]) + self.pattern.add_layer( + "fluid.layers.reshape", + inputs={"x": "reshape-input-3", + "shape": gen_name(1)}, + outputs=[gen_name(2)]) + self.pattern.build(inputs={ + "input-0": "reshape-input-0", + "input-1": "reshape-input-1", + "input-2": "reshape-input-2", + "input-3": "reshape-input-3", + }) + + def insert_new_layer(self, graph, parameters, matches): + self.update_layer(matches) + matches.pop(list(matches.keys())[1]) + matches.pop(list(matches.keys())[1]) + + def update_layer(self, matches): + layers_id = list(matches.keys()) + layer = matches[layers_id[0]] + int_input_name = layer.inputs["input"] + output_name = layer.outputs[0] + layer = matches[layers_id[1]] + for key, input_name in layer.inputs.items(): + if input_name == output_name: + layer.inputs[key] = int_input_name diff --git a/x2paddle/optimizer/pytorch_optimizer/optimizer.py b/x2paddle/optimizer/pytorch_optimizer/optimizer.py new file mode 100644 index 0000000..3ca3b4a --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/optimizer.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.optimizer.pytorch_optimizer.fusion import * +from x2paddle.optimizer.pytorch_optimizer.pass_manager import PassManager + + +class GraphOptimizer(object): + def __init__(self): + self.passes = [ + "constant_fuse_pass", "batchnorm2d_fuse_pass", + "interpolate_bilinear_fuse_pass", "fc_fuse_pass", + "adaptive_pool2d_fuse_pass", "reshape_fuse_pass", + "dropout_fuse_pass" + ] + + def optimize(self, graph): + for pass_name in self.passes: + pass_ = PassManager.lookup(pass_name)() + pass_.apply(graph) + print("{} done!".format(pass_name)) + return graph diff --git a/x2paddle/optimizer/pytorch_optimizer/pass_.py b/x2paddle/optimizer/pytorch_optimizer/pass_.py new file mode 100644 index 0000000..0bfa2ff --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/pass_.py @@ -0,0 +1,27 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Pass(object): + name = "pass" + + def __init__(self): + pass + + def apply(self, graph): + raise NotImplementedError("The apply function must be implemented!") + + @classmethod + def get_name(cls): + return cls.name diff --git a/x2paddle/optimizer/pytorch_optimizer/pass_manager.py b/x2paddle/optimizer/pytorch_optimizer/pass_manager.py new file mode 100644 index 0000000..8653f62 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/pass_manager.py @@ -0,0 +1,42 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class PassManager(object): + """ pass管理器。 + """ + # pass_map存储name与其对应的pass + pass_map = dict() + + def __init__(self): + pass + + @staticmethod + def add_new_pass(name, pass_): + if name not in PassManager.pass_map: + PassManager.pass_map[name] = pass_ + + @staticmethod + def clear(): + PassManager.passes = list() + + @staticmethod + def lookup(name): + return PassManager.pass_map[name] + + +def pass_register(cls): + name = cls.get_name() + PassManager.add_new_pass(name, cls) + return cls diff --git a/x2paddle/optimizer/pytorch_optimizer/pattern_matcher.py b/x2paddle/optimizer/pytorch_optimizer/pattern_matcher.py new file mode 100644 index 0000000..c5b38b0 --- /dev/null +++ b/x2paddle/optimizer/pytorch_optimizer/pattern_matcher.py @@ -0,0 +1,293 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.core.program import PaddleGraph + + +class PatternMatcher(object): + def __init__(self, pattern): + self.pattern = pattern + # matches的每个match是按照拓扑排序组成layer的dict + self.matches = list() + + def operate(self, graph, match_kind="topo"): + if match_kind == "topo": + self.detect_patterns_by_topo(graph) + elif match_kind == "edge": + self.detect_patterns_by_edge(graph) + self.remove_overlapped_match() + return self.matches + + def detect_patterns_by_topo(self, graph): + """ 找到与模式匹配的子图, + 并将子图的id以拓扑排序存放到subgraph_id2layers。 + """ + + def get_subgraph(pattern, graph, start_index, is_subblock=False): + pattern_index = 0 + pattern_id2layers = pattern.get_global_layers() + pattern_ids = list(pattern_id2layers.keys()) + subgraph_id2layers = dict() + graph_layers = dict(list(graph.layers.items())[start_index:]) + for layer_id, layer in graph_layers.items(): + pattern_layer = pattern.layers[list(pattern.layers.keys())[ + pattern_index]] + if layer.kernel == pattern_layer.kernel: + subgraph_id2layers[layer_id] = layer + pattern_layer_id = pattern_layer.id + # 判断输入连接是否一致 + if layer_id in graph.edges_in: + if pattern_layer_id not in pattern.edges_in: + if pattern_index == 0 or is_subblock: + return False + else: + subgraph_id2layers.pop(layer_id) + continue + else: + if len(graph.edges_in[layer_id]) != len( + pattern.edges_in[pattern_layer_id]): + if pattern_index == 0 or is_subblock: + return False + else: + subgraph_id2layers.pop(layer_id) + continue + layer_in = graph.edges_in[layer_id] + pattern_layer_in = pattern.edges_in[pattern_layer_id] + for i in range(len(layer_in)): + layer_id_in = layer_in[i] + pattern_layer_id_in = pattern_layer_in[i] + if pattern_layer_id_in != -1: + subgraph_ids = list(subgraph_id2layers.keys()) + if layer_id_in not in subgraph_ids: + return False + if pattern_ids.index(pattern_layer_id_in) == \ + subgraph_ids.index(layer_id_in): + # 判断pattern输入在pattern_ids的索引 + # 和graph输入在subgraph_ids的索引一致 + continue + if pattern_index == 0 or is_subblock: + return False + else: + subgraph_id2layers.pop(layer_id) + continue + # 判断subgraph中的节点是否被外部图使用到(如若被使用到则无效) + if layer_id in graph.edges_out: + if pattern_layer_id not in pattern.edges_out: + if not set(pattern_layer.outputs).issubset( + pattern.outputs): + # 若pattern当前layer的输出是pattern的输出,则是正确的 + if pattern_index == 0 or is_subblock: + return False + else: + subgraph_id2layers.pop(layer_id) + continue + else: + if len(graph.edges_out[layer_id]) != len( + pattern.edges_out[pattern_layer_id]): + # 如果在每个节点edges_in相同的情况下,edges_out数目相同则说明无节点在subgraph外被用到 + if not set(pattern_layer.outputs).issubset( + pattern.outputs): + # 若pattern当前layer的输出是pattern的输出,则是正确的 + if pattern_index == 0 or is_subblock: + return False + else: + subgraph_id2layers.pop(layer_id) + continue + # 当为控制流时的处理 + if layer.kernel == "prim.if" or layer.kernel == "prim.loop": + if len(pattern_layer.blocks) != len(layer.blocks): + if pattern_index == 0 or is_subblock: + return False + else: + subgraph_id2layers.pop(layer_id) + continue + is_subblock_match = True + for i, b in enumerate(pattern_layer.blocks): + match_info = get_subgraph( + pattern_layer.blocks[i], + layer.blocks[i], + 0, + is_subblock=True) + if match_info is not False: + subgraph_id2layers.update(match_info) + else: + is_subblock_match = False + break + if not is_subblock_match: + if pattern_index == 0 or is_subblock: + return False + else: + index = list(subgraph_id2layers.keys()).index( + layer_id) + for key in list(subgraph_id2layers.keys())[ + index:]: + subgraph_id2layers.pop(key) + continue + pattern_index += 1 + if pattern_index == len(pattern.layers): + return subgraph_id2layers + else: + if pattern_index == 0 or is_subblock: + return False + else: + continue + if pattern_index == len(pattern.layers): + return subgraph_id2layers + return False + + for i, (layer_id, layer) in enumerate(graph.layers.items()): + match_info = get_subgraph(self.pattern, graph, i) + if match_info: + self.matches.append(match_info) + for j, block in enumerate(layer.blocks): + if len(block.layers) > 0: + self.detect_patterns_by_topo(layer.blocks[j]) + + def detect_patterns_by_edge(self, graph, ignore_list_inputs=True): + """当遇见顺序没有强制规定的pattern时使用该方式 + """ + + def get_subgraph(pattern, graph, start_index): + pattern_id2layers = pattern.get_global_layers() + pattern_ids = list(pattern_id2layers.keys()) + pattern_layer_id = pattern_ids[0] + subgraph_id2layers = dict() + graph_layers = dict(list(graph.layers.items())[start_index:]) + layer_id = list(graph_layers.keys())[0] + + def update(layer_id, pattern_layer_id): + layer = graph_layers[layer_id] + pattern_layer = pattern_id2layers[pattern_layer_id] + if layer.kernel != pattern_layer.kernel: + return False + subgraph_id2layers[layer_id] = layer + for i, pattern_layer_id_in in enumerate(pattern.edges_in[ + pattern_layer_id]): + if pattern_layer_id_in == -1 or ignore_list_inputs: + continue + layer_id_in = graph.edges_in[layer_id][i] + subgraph_ids = list(subgraph_id2layers.keys()) + if layer_id_in not in subgraph_ids: + return False + if pattern.edges_out.get(pattern_layer_id, 0) != 0: + if len(pattern.edges_out[pattern_layer_id]) != \ + len(graph.edges_out[layer_id]): + return False + for i, pattern_layer_id_out in enumerate(pattern.edges_out[ + pattern_layer_id]): + if pattern_layer_id_out in pattern_ids: + new_layer_id_out = graph.edges_out[layer_id][i] + for j, new_new_layer_id_in in enumerate( + graph.edges_in[new_layer_id_out]): + if new_new_layer_id_in not in subgraph_id2layers: + if ignore_list_inputs: + continue + new_new_pattern_layer_id_in = pattern.edges_in[ + pattern_layer_id_out][j] + if new_new_pattern_layer_id_in == -1: + continue + update(new_new_layer_id_in, + new_new_pattern_layer_id_in) + update(new_layer_id_out, pattern_layer_id_out) + + while len(subgraph_id2layers) != len(pattern_id2layers): + out = update(layer_id, pattern_layer_id) + if out == False: + return False + else: + if len(subgraph_id2layers) == len(pattern_id2layers): + return subgraph_id2layers + else: + return False + + for i, (layer_id, layer) in enumerate(graph.layers.items()): + match_info = get_subgraph(self.pattern, graph, i) + if match_info: + self.matches.append(match_info) + for j, block in enumerate(layer.blocks): + if len(block.layers) > 0: + self.detect_patterns_by_edge(layer.blocks[j]) + + def remove_overlapped_match(self): + """ 如果2个子图有重叠,只取前一个子图。 + """ + match_ids = [] + for i, match in enumerate(self.matches): + is_overlapped = False + for id in match.keys(): + if id in match_ids: + self.matches.pop(i) + is_overlapped = True + break + if not is_overlapped: + match_ids.extend(list(match.keys())) + + +def get_subgraph(prefix_layer_id, suffix_layer_id, graph): + """ 根据prefix_layer_id和suffix_layer_id获取需要子图。 + Args: + prefix_layer_id (str): 起初为一个空字符串,之后为suffix_layer_id分割出来的前缀。 + suffix_layer_id (str): 起初为以一个layer的id,之后将分割部分给prefix_layer_id;例如”57.0.1“; + graph (x2paddle.core.program.PaddleGraph): 需要进行pass的子图。 + """ + id_part = suffix_layer_id.split(".") + if len(id_part) == 1: + return graph + if prefix_layer_id == "": + layer_id = id_part[0] + prefix_layer_id += ".".join(id_part[:2]) + else: + layer_id = prefix_layer_id + "." + id_part[0] + prefix_layer_id += ("." + ".".join(id_part[:2])) + subgraph = graph.layers[layer_id].blocks[int(id_part[1])] + suffix_layer_id = ".".join(id_part[2:]) + return get_subgraph(prefix_layer_id, suffix_layer_id, subgraph) + + +class FuseBase(object): + def __init__(self, graph_type): + self.pattern = PaddleGraph(graph_type=graph_type) + + def operate(self, graph, match_kind="topo"): + parameters = graph.parameters + self.build_pattern() + self.perform_pattern_matcher(graph, match_kind) + for match in self.matches: + first_layer_id = list(match.keys())[0] + subgraph = get_subgraph("", first_layer_id, graph) + self.insert_new_layer(subgraph, parameters, match) + self.delete_inter_layer(graph) + graph.build() + + def perform_pattern_matcher(self, graph, match_kind="topo"): + """ 执行模式匹配,找到匹配的子图。 + """ + pattern_matcher = PatternMatcher(self.pattern) + self.matches = pattern_matcher.operate(graph, match_kind) + + def delete_inter_layer(self, graph): + """ 删除不需要的中间layer及其对应参数。 + """ + for match in self.matches: + first_layer_id = list(match.keys())[0] + subgraph = get_subgraph("", first_layer_id, graph) + for layer_id, layer in match.items(): + if layer.kernel == "fluid.dygraph.base.to_variable" and \ + layer.attrs["value"].startswith("params["): + param_name = layer.attrs["value"][8:-2] + if param_name in graph.parameters: + graph.parameters.pop(param_name) + if layer_id in subgraph.layers: + # layer_id可能是属于子图的,此时删除父layer,即删除整个子图 + subgraph.layers.pop(layer_id) -- GitLab