未验证 提交 6ab5420c 编写于 作者: S SunAhong1993 提交者: GitHub

Merge pull request #11 from PaddlePaddle/paddle-2.0

Paddle 2.0
......@@ -16,7 +16,6 @@
from __future__ import print_function
from __future__ import division
import paddle.fluid as fluid
import os.path as osp
import paddle
from paddle.fluid.proto import framework_pb2
from collections import OrderedDict
......@@ -26,6 +25,7 @@ import os
import six
import pickle
import numpy as np
from os import path as osp
class PaddleLayer(object):
......@@ -77,7 +77,6 @@ class PaddleGraph(object):
self.custom_code = None
self.inputs_info = None
def set_name(self, name):
self.name = name.replace("-", "_").replace("/", "_")
......@@ -233,7 +232,7 @@ class PaddleGraph(object):
return update(self.layers)
def gen_model(self, save_dir, jit_type=None):
if not os.path.exists(save_dir):
if not osp.exists(save_dir):
os.makedirs(save_dir)
if self.graph_type == "static":
self.gen_static_model(save_dir)
......@@ -241,8 +240,8 @@ class PaddleGraph(object):
self.gen_dygraph_model(save_dir, jit_type)
def gen_static_model(self, save_dir):
code_dir = os.path.join(save_dir, 'model_with_code')
infer_dir = os.path.join(save_dir, 'inference_model')
code_dir = osp.join(save_dir, 'model_with_code')
infer_dir = osp.join(save_dir, 'inference_model')
self.gen_static_code(code_dir)
sys.path.append(code_dir)
import x2paddle_model
......@@ -255,13 +254,13 @@ class PaddleGraph(object):
inputs, outputs = x2paddle_model.x2paddle_net()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
param_dir = os.path.join(code_dir, 'weights')
param_dir = osp.join(code_dir, 'weights')
for k, v in self.parameters.items():
if scope.find_var(k):
self.dump_parameter(k, v, param_dir)
def if_exist(var):
b = os.path.exists(
os.path.join(os.path.join(param_dir, var.name)))
b = osp.exists(
osp.join(osp.join(param_dir, var.name)))
return b
fluid.io.load_vars(
exe, param_dir, main_program, predicate=if_exist)
......@@ -283,13 +282,20 @@ class PaddleGraph(object):
self.gen_dygraph_code(save_dir)
self.dump_dygraph_parameter(save_dir)
# 动转静
code_path = osp.join(osp.abspath(save_dir), "x2paddle_code.py")
print("Exporting inference model from python code ('{}')... \n".format(code_path))
if len(self.inputs_info) > 0:
input_shapes = list()
input_types = list()
for input_name in self.inputs:
input_shapes.append(self.inputs_info[input_name][0])
input_types.append(self.inputs_info[input_name][1])
self.dygraph2static(save_dir, input_shapes, input_types)
try:
self.dygraph2static(save_dir, input_shapes, input_types)
except Exception as e:
print("Fail to generate inference model! Problem happend while export inference model from python code '{}';\n".format(coda_path))
print("===================Error Information===============")
raise e
def gen_static_code(self, code_dir):
def write_code(f, code_list, indent=0):
......@@ -300,9 +306,9 @@ class PaddleGraph(object):
else:
f.write(indent_blank + code_line + '\n')
if not os.path.exists(code_dir):
if not osp.exists(code_dir):
os.makedirs(code_dir)
f = open(os.path.join(code_dir, 'x2paddle_model.py'), 'w')
f = open(osp.join(code_dir, 'x2paddle_model.py'), 'w')
write_code(
f, [
......@@ -365,7 +371,7 @@ class PaddleGraph(object):
def dump_parameter(self, param_name, param, save_dir):
if not os.path.exists(save_dir):
if not osp.exists(save_dir):
os.makedirs(save_dir)
dtype_map = {
"int16": [framework_pb2.VarType.INT16, 'h'],
......@@ -385,7 +391,7 @@ class PaddleGraph(object):
assert str(
param.dtype) in dtype_map, "Unknown dtype {} of params: {}.".format(
str(param.dtype), param_name)
fp = open(os.path.join(save_dir, param_name), 'wb')
fp = open(osp.join(save_dir, param_name), 'wb')
numpy.array([0], dtype='int32').tofile(fp)
numpy.array([0], dtype='int64').tofile(fp)
numpy.array([0], dtype='int32').tofile(fp)
......@@ -447,6 +453,9 @@ class PaddleGraph(object):
if self.source_type == "caffe":
custom_import = "from x2paddle.op_mapper.dygraph.caffe2paddle " + \
"import caffe_custom_layer as x2paddle_nn"
elif self.source_type == "pytorch":
custom_import = "from x2paddle.op_mapper.dygraph.pytorch2paddle " + \
"import pytorch_custom_layer as x2paddle_nn"
else:
custom_import = ""
self.head = gen_codes(
......@@ -455,6 +464,7 @@ class PaddleGraph(object):
"from paddle.fluid.param_attr import ParamAttr",
"import paddle",
"import paddle.fluid as fluid",
"import math",
custom_import,
"",
"class {}(paddle.nn.Layer):".format(self.name),
......@@ -491,7 +501,7 @@ class PaddleGraph(object):
use_structured_name = False if self.source_type in ["tf", "onnx"] else True
self.run_func.extend(
gen_codes(["paddle.disable_static()",
"params, _ = fluid.load_dygraph('{}/model')".format(code_dir),
"params = paddle.load('{}/model.pdparams')".format(osp.abspath(code_dir)),
"model = {}()".format(self.name),
"model.set_dict(params, use_structured_name={})".format(use_structured_name),
"model.eval()",
......@@ -499,7 +509,7 @@ class PaddleGraph(object):
"return out"], indent=1))
def write_code(code_dir):
f = open(os.path.join(code_dir, 'x2paddle_code.py'), 'w')
f = open(osp.join(code_dir, 'x2paddle_code.py'), 'w')
for code_line in self.head:
f.write(code_line)
init_writen_codes = []
......@@ -590,7 +600,10 @@ class PaddleGraph(object):
if isinstance(v, list):
line += "{}=[{}], ".format(k, ", ".join(v))
else:
line += "{}={}, ".format(k, v)
if k == "args":
line += v
else:
line += "{}={}, ".format(k, v)
for k, v in layer.attrs.items():
line += "{}={}, ".format(k, v)
line = line.strip(", ")
......@@ -608,9 +621,8 @@ class PaddleGraph(object):
return self.init_func, self.forward_func
def dump_dygraph_parameter(self, code_dir):
params_output = open(os.path.join(code_dir, 'model.pdparams'), 'wb')
pickle.dump(self.parameters, params_output)
params_output.close()
save_path = osp.join(code_dir, 'model.pdparams')
paddle.save(self.parameters, save_path)
def dygraph2static(self, save_dir, input_shapes=[], input_types=[]):
from paddle.fluid.dygraph.jit import declarative
......@@ -624,7 +636,7 @@ class PaddleGraph(object):
sys.path.insert(0, save_dir)
import x2paddle_code
paddle.disable_static()
restore, _ = fluid.load_dygraph(osp.join(save_dir, "model"))
restore = paddle.load(osp.join(save_dir, "model.pdparams"))
model = getattr(x2paddle_code, self.name)()
if self.source_type in ["tf", "onnx"]:
model.set_dict(restore, use_structured_name=False)
......
......@@ -402,7 +402,7 @@ class TFDecoder(object):
right_shape_been_input = False
while not right_shape_been_input:
try:
shape = raw_input(
shape = input(
"Shape of Input(e.g. None,224,224,3): ")
except:
shape = input("Shape of Input(e.g. None,224,224,3): ")
......
......@@ -752,6 +752,56 @@ def aten_chunk(mapper, graph, node):
return current_inputs, current_outputs
def aten_clamp(mapper, graph, node):
""" 构造元素剪裁的PaddleLayer。
TorchScript示例:
%56 : Tensor = aten::clamp(%input.1, %46, %48, %49)
参数含义:
%56 (Tensor): 输出,累加后的结果。
%input.1 (Tensor): 输入,需要剪裁的Tensor。
%46 (float/Tensor): 最小值。
%48 (float/Tensor): 最大值。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%46
if inputs_name[1] in mapper.attrs:
layer_attrs["min"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs, scope_name)
layer_inputs["min"] = inputs_name[1]
current_inputs.append(inputs_name[1])
# 处理输入2,即%48,代表dtype
if inputs_name[2] in mapper.attrs:
layer_attrs["max"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs, scope_name)
layer_inputs["max"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
"paddle.clip",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
def aten___contains__(mapper, graph, node):
""" 构造in的PaddleLayer。
......@@ -810,7 +860,7 @@ def aten_constant_pad_nd(mapper, graph, node):
# 处理输入1,即%4876
layer_attrs["padding"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%42
layer_attrs["pad_value"] = mapper.attrs[inputs_name[2]]
layer_attrs["value"] = mapper.attrs[inputs_name[2]]
graph.add_layer(
"prim.shape",
......@@ -856,7 +906,7 @@ def aten_constant_pad_nd(mapper, graph, node):
block.add_layer(
kernel,
inputs={"input": inputs_name[0] + "_var"},
outputs=layer_outputs,
outputs=copy.deepcopy(layer_outputs),
scope_name=scope_name,
**layer_attrs)
block.add_layer(
......@@ -1517,76 +1567,28 @@ def aten_expand(mapper, graph, node):
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%1875
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%1888
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
graph.add_layer(
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"],
scope_name=scope_name)
graph.add_layer(
"prim.str",
inputs={"input": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_type"],
scope_name=scope_name)
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_cond"],
scope_name=scope_name,
y=string("VarType.BOOL"))
graph.add_layer(
"prim.if", {'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if1", inputs_name[1] + "_var"],
scope_name=scope_name)
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(parent_layer=if_layer, graph_type="dygraph")
block.add_layer(
"paddle.cast",
inputs={"x": inputs_name[0]},
outputs=[inputs_name[0]],
scope_name=scope_name,
dtype=string("int64"))
block.add_layer(
"self.create_parameter",
inputs={"shape": inputs_name[1]},
outputs=[inputs_name[1] + "_var"],
scope_name=scope_name,
dtype=string("int64"),
default_initializer="paddle.nn.initializer.Constant(value=0.0)")
if_layer.add_block(block)
block = PaddleGraph(parent_layer=if_layer, graph_type="dygraph")
block.add_layer(
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"],
scope_name=scope_name)
block.add_layer(
"self.create_parameter",
inputs={"shape": inputs_name[1]},
outputs=[inputs_name[1] + "_var"],
scope_name=scope_name,
dtype=inputs_name[0] + "_type",
default_initializer="paddle.nn.initializer.Constant(value=0.0)")
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[0]
if_layer.inputs["input-1"] = inputs_name[1]
layer_inputs["y"] = inputs_name[1] + "_var"
current_outputs.append(inputs_name[1] + "_var")
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
current_inputs.append(inputs_name[1])
# 处理输入1,即%51
if inputs_name[1] in mapper.attrs:
layer_attrs["shape"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs, scope_name)
layer_inputs["shape"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"paddle.expand_as", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
"paddle.expand",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
......@@ -1841,11 +1843,39 @@ def aten_floor(mapper, graph, node):
current_outputs = [output_name]
# 处理输入0,即%scale.18
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["input"] = inputs_name[0]
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.floor", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
graph.add_layer(
"prim.type",
{'input': inputs_name[0]},
outputs=[inputs_name[0] + "_type"],
scope_name=scope_name)
graph.add_layer(
"prim.str",
{'input': inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_type"],
scope_name=scope_name)
graph.add_layer(
"prim.startswith",
{'input': inputs_name[0] + "_type"},
outputs=[inputs_name[0] + "_cond"],
scope_name=scope_name,
start_str=string("VarType"))
graph.add_layer(
"prim.if",
{'input': inputs_name[0] + "_cond"},
outputs=[inputs_name[0] + "_if"],
scope_name=scope_name)
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(parent_layer=if_layer, graph_type="dygraph")
block.add_layer("paddle.floor", inputs=copy.deepcopy(layer_inputs), outputs=copy.deepcopy(layer_outputs), scope_name=scope_name)
if_layer.add_block(block)
block = PaddleGraph(parent_layer=if_layer, graph_type="dygraph")
block.add_layer("prim.floor", inputs=copy.deepcopy(layer_inputs), outputs=copy.deepcopy(layer_outputs), scope_name=scope_name)
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[0]
if_layer.outputs.append(output_name)
return current_inputs, current_outputs
......@@ -1957,6 +1987,46 @@ def aten_full_like(mapper, graph, node):
return current_inputs, current_outputs
def aten_gather(mapper, graph, node):
""" 构造gather激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::gather(%input.5, %18, %19, %20, %21)
参数含义:
%result.3 (Tensor): 输出,gather后的结果。
%result.5 (Tensor): 需要gather的Tensor。
%18 (int): 需要gather的维度。
%19 (Tensor): 需要gather的索引。
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("gather", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%18
layer_attrs["dim"] = mapper.attrs[inputs_name[1]]
# 处理输入2,即%19
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs, scope_name)
layer_inputs["index"] = inputs_name[2]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"custom_layer:Gather",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
def aten_gelu(mapper, graph, node):
""" 构造GeLU激活的PaddleLayer。
......@@ -2855,6 +2925,33 @@ def aten_mean(mapper, graph, node):
return current_inputs, current_outputs
def aten_meshgrid(mapper, graph, node):
""" 构造对每个张量做扩充操作的PaddleLayer。
TorchScript示例:
%out.39 : int = aten::mshgrid(%input.1)
参数含义:
%out.39 (Tensor): 输出,扩充后的结果。
%input.1 (Tensor): 输入。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.1
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["args"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = layer_inputs.values()
current_outputs = layer_outputs
graph.add_layer("paddle.meshgrid", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs
def aten_mul(mapper, graph, node):
""" 构造数值相乘的PaddleLayer。
......
......@@ -180,7 +180,7 @@ def prim_float(layer, indent=1, init_func=[], forward_func=[], layer_id=None, di
def prim_floor(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = math.floor({})".format(layer.outputs[0],
get_value(layer, "input", different_attrs))
get_value(layer, "x", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
......@@ -404,6 +404,13 @@ def prim_slice(layer, indent=1, init_func=[], forward_func=[], layer_id=None, di
get_value(layer, "end", different_attrs),
get_value(layer, "step", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_startswith(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}.startswith({})".format(layer.outputs[0],
get_value(layer, "input", different_attrs),
get_value(layer, "start_str", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_str(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
......@@ -451,3 +458,4 @@ def prim_warnings(layer, indent=1, init_func=[], forward_func=[], layer_id=None,
get_value(layer, "input", different_attrs), layer.attrs["stacklevel"])
lines.append(line)
forward_func.extend(gen_codes(lines, indent=indent))
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .gather import Gather
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
from itertools import product
import numpy as np
class Gather(object):
def __init__(self, dim):
self.dim = dim
self.dtype_mapping = {"VarType.INT32": "int32",
"VarType.INT64": "int64"}
def __call__(self, x, index):
if self.dim < 0:
self.dim += len(x.shape)
x_range = list(range(len(x.shape)))
x_range[0] = self.dim
x_range[self.dim] = 0
x_swaped = paddle.transpose(x, perm=x_range)
index_range = list(range(len(index.shape)))
index_range[0] = self.dim
index_range[self.dim] = 0
index_swaped = paddle.transpose(index, perm=index_range)
dtype = self.dtype_mapping[str(index.dtype)]
x_shape = paddle.shape(x_swaped)
index_shape = paddle.shape(index_swaped)
prod = paddle.prod(x_shape, dtype=dtype) / x_shape[0]
x_swaped_flattend = paddle.flatten(x_swaped)
index_swaped_flattend = paddle.flatten(index_swaped)
index_swaped_flattend *= prod
bias = paddle.arange(start=0, end=prod, dtype=dtype)
bias = paddle.reshape(bias, x_shape[1:])
bias = paddle.crop(bias, index_shape[1:])
bias = paddle.flatten(bias)
bias = paddle.tile(bias, [index_shape[0]])
index_swaped_flattend += bias
gathered = paddle.index_select(x_swaped_flattend, index_swaped_flattend)
gathered = paddle.reshape(gathered, index_swaped.shape)
out = paddle.transpose(gathered, perm=x_range)
return out
......@@ -201,7 +201,6 @@ class HierarchicalTree(Tree):
code_str = gen_layer_code(self.pd_graph, sub_layers, module_name,
different_attrs=diff_attrs_column)
# print(code_str)
self.codes.append(code_str)
for sub_layers in sub_layers_list:
inputs, outputs = get_inputs_outputs(self.pd_graph, sub_layers)
......@@ -359,7 +358,7 @@ class HierarchicalTree(Tree):
run_func_list.append(" # {}: 形状为{},类型为{}。".format(k, v[0], v[1]))
run_func_list.extend(
[" paddle.disable_static()",
" params, _ = fluid.load_dygraph('{}/model')".format(save_dir),
" params = paddle.load('{}/model.pdparams')".format(osp.abspath(save_dir)),
" model = {}()".format(self.pd_graph.name),
" model.set_dict(params)",
" model.eval()",
......@@ -371,7 +370,12 @@ class HierarchicalTree(Tree):
self.update_parameters()
import_list = ["import paddle",
"import paddle.fluid as fluid",
"",]
"from paddle.fluid.initializer import Constant",
"from paddle.fluid.param_attr import ParamAttr",
"import math",
"from x2paddle.op_mapper.dygraph.pytorch2paddle " + \
"import pytorch_custom_layer as x2paddle_nn"
"\n",]
import_str = "\n".join(import_list)
if not osp.exists(save_dir):
os.makedirs(save_dir)
......
......@@ -29,9 +29,9 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn",
"paddle.nn.Tanh": "tanh",
"paddle.nn.AvgPool2D": "pool",
"paddle.nn.MaxPool2D": "pool",
"paddle.nn.Pad1d": "pad",
"paddle.nn.Pad2d": "pad",
"paddle.nn.Pad3d": "pad",
"paddle.nn.Pad1D": "pad",
"paddle.nn.Pad2D": "pad",
"paddle.nn.Pad3D": "pad",
"paddle.nn.Dropout": "dropout",
"paddle.nn.GELU": "gelu",
"paddle.nn.Hardtanh": "tanh",
......@@ -175,9 +175,11 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
if layer.kernel.startswith("paddle.nn") and index == 0:
continue
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
or layer.kernel == "prim.assert":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
elif output_name not in outputs:
outputs.append(output_name)
continue
......@@ -187,15 +189,22 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
if layer.kernel.startswith("paddle.nn") and index == 0 and "functional" not in layer.kernel:
continue
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
or layer.kernel == "prim.assert":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
else:
outputs.append(output_name)
no_output_count = 0
for i, (layer_id, layer) in enumerate(sub_layers.items()):
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel):
line = "self.{} = {}(".format(layer.outputs[0], layer.kernel)
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel) or \
layer.kernel.startswith("custom_layer"):
line = "self.{}".format(layer.outputs[0])
if layer.kernel.startswith("custom_layer"):
line += "= x2paddle_nn.{}(".format(layer.kernel.split(":")[-1])
else:
line += " = {}(".format(layer.kernel)
for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k)
if key_name in different_attrs:
......@@ -289,7 +298,10 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=list()):
else:
if v not in cur_outputs and v not in inputs:
inputs.append(v)
line += "{}={}, ".format(k, v)
if k == "args":
line += v
else:
line += "{}={}, ".format(k, v)
for k, v in layer.attrs.items():
key_name = "{}_{}".format(layer.outputs[0], k)
if key_name in different_attrs:
......
......@@ -50,21 +50,25 @@ def get_inputs_outputs(pd_graph, layers):
for layer_id, layer in layers.items():
# 获取输出节点名字
if layer_id not in pd_graph.edges_out:
for output_name in layer.outputs:
for index, output_name in enumerate(layer.outputs):
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
or layer.kernel == "prim.assert":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
elif output_name not in outputs:
outputs.append(output_name)
else:
for out_layer_id in pd_graph.edges_out[layer_id]:
if out_layer_id not in layer_ids:
for output_name in layer.outputs:
for index, output_name in enumerate(layer.outputs):
if not output_name.startswith("x") or output_name in outputs \
or layer.kernel == "prim.assert" or \
layer.kernel == "prim.if" or layer.kernel == "prim.loop":
or layer.kernel == "prim.assert":
continue
elif layer.kernel == "prim.if" or layer.kernel == "prim.loop":
if index != 0:
outputs.append(output_name)
else:
outputs.append(output_name)
# 获取输入节点名字
......
......@@ -21,7 +21,8 @@ class GraphOptimizer(object):
def __init__(self, source_frame, paddle_type="dygraph", jit_type="trace"):
if source_frame == "pytorch":
if jit_type == "trace":
self.passes = ["trace_fc_fuse_pass"]
self.passes = ["dygraph_constant_fuse_pass",
"trace_fc_fuse_pass"]
else:
self.passes = [
"dygraph_constant_fuse_pass",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册