未验证 提交 fee5405a 编写于 作者: W WJJ1995 提交者: GitHub

Support YOLOX and Adapt to PyTorch 1.7.0 (#696)

* add SiLU op

* fixed bugs

* support yolox model

* fix code style

* Handling the case of underslashes

* add aten::format and remove to_tensor

* deal with comments

* fixed for CI

* update rm to_tensor
上级 06984e8b
# X2Paddle支持OP列表
> 目前X2Paddle支持90+的TensorFlow OP,30+的Caffe Layer,80+的ONNX OP,110+的PyTorch Aten,10+的PyTorch Prim覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
> 目前X2Paddle支持90+的TensorFlow OP,30+的Caffe Layer,80+的ONNX OP,120+的PyTorch Aten,10+的PyTorch Prim覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
**注:** 目前,部分OP暂未支持,如您在转换过程中出现OP不支持的情况,可自行添加或反馈给我们。欢迎通过[ISSUE反馈](https://github.com/PaddlePaddle/X2Paddle/issues/new)的方式告知我们(模型名,代码实现或模型获取方式),我们会及时跟进:)
......@@ -109,7 +109,7 @@ Aten:
| 105 | aten::where | 106 | aten::zeros |107|aten::zeros\_like|108|aten::bmm|
| 109 | aten::sub\_ | 110 | aten:erf |111|aten::lstm|112|aten::gather|
| 113 | aten::upsample\_nearest2d | 114 | aten::split\_with\_sizes | 115 | aten::sum | 116 | aten::instance_norm |
| 117 | aten::bitwise_not | 118 | aten::bitwise_xor | 119 | aten::bitwise_and | | |
| 117 | aten::bitwise_not | 118 | aten::bitwise_xor | 119 | aten::bitwise_and | 120 | aten::silu |
Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
......
......@@ -88,6 +88,11 @@ def arg_parser():
type=_text_type,
default=None,
help="pretrain model file of pytorch model")
parser.add_argument(
"--enable_code_optim",
"-co",
default=True,
help="Turn on code optimization")
parser.add_argument(
"--to_lite", "-tl", default=False, help="convert to Paddle-Lite format")
parser.add_argument(
......@@ -222,6 +227,7 @@ def pytorch2paddle(module,
save_dir,
jit_type="trace",
input_examples=None,
enable_code_optim=True,
convert_to_lite=False,
lite_valid_places="arm",
lite_model_type="naive_buffer"):
......@@ -262,7 +268,8 @@ def pytorch2paddle(module,
graph_opt = GraphOptimizer(source_frame="pytorch", jit_type=jit_type)
graph_opt.optimize(mapper.paddle_graph)
logging.info("Model optimized.")
mapper.paddle_graph.gen_model(save_dir, jit_type=jit_type)
mapper.paddle_graph.gen_model(
save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim)
if convert_to_lite:
convert2lite(save_dir, lite_valid_places, lite_model_type)
......
......@@ -237,11 +237,11 @@ class PaddleGraph(object):
return update(self.layers)
def gen_model(self, save_dir, jit_type=None):
def gen_model(self, save_dir, jit_type=None, enable_code_optim=True):
if not osp.exists(save_dir):
os.makedirs(save_dir)
if jit_type == "trace":
if not self.has_unpack:
if not self.has_unpack and enable_code_optim:
from x2paddle.optimizer.pytorch_code_optimizer import HierarchicalTree
hierarchical_tree = HierarchicalTree(self)
for layer_id, layer in self.layers.items():
......@@ -252,7 +252,7 @@ class PaddleGraph(object):
self.gen_code(save_dir)
self.dump_parameter(save_dir)
else:
if self.source_type == "pytorch":
if self.source_type == "pytorch" and enable_code_optim:
from x2paddle.optimizer.pytorch_code_optimizer import ModuleGraph
module_graph = ModuleGraph(self)
module_graph.save_source_files(save_dir)
......
......@@ -263,41 +263,6 @@ def aten_addmm(mapper, graph, node):
def aten_add(mapper, graph, node):
""" 构造数值相加的PaddleLayer,该节点实现out = x + y。
TorchScript示例:
%296 : int = aten::add(%i.12, %288)
参数含义:
%296 (-): 相加结果。
%i.12 (-): 输入数值 x。
%288 (-): 输入数值 y。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%i.12
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%288
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs,
scope_name)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.add",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_add_(mapper, graph, node):
""" 构造数值相加的PaddleLayer,该节点实现out = x + alpha * y。
TorchScript示例:
%137 : Tensor = aten::add(%136, %130, %130)
......@@ -325,21 +290,29 @@ def aten_add_(mapper, graph, node):
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入2,即%151
if inputs_name[2] in mapper.attrs:
layer_attrs["alpha"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs, scope_name)
layer_inputs["alpha"] = inputs_name[2]
current_inputs.append(inputs_name[2])
if len(inputs_name) > 2:
# 处理输入2,即%151
if inputs_name[2] in mapper.attrs:
layer_attrs["alpha"] = mapper.attrs[inputs_name[2]]
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs, scope_name)
layer_inputs["alpha"] = inputs_name[2]
current_inputs.append(inputs_name[2])
graph.add_layer(
"prim.add_",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
graph.add_layer(
"prim.add_",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
else:
graph.add_layer(
"prim.add",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
......@@ -1634,41 +1607,6 @@ def aten_dim(mapper, graph, node):
return current_inputs, current_outputs
def aten_div_(mapper, graph, node):
""" 构造除法的PaddleLayer。
TorchScript示例:
%bx_bw0.3 : Tensor = aten::div_(%bx_bw.3, %2678)
参数含义:
%bx_bw0.3 (-): 除后的结果。
%bx_bw.3 (-): 被除数。
%2678 (int): 除数。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs,
scope_name)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.div",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_div(mapper, graph, node):
""" 构造除法的PaddleLayer。
TorchScript示例:
......@@ -1737,39 +1675,6 @@ def aten_dropout(mapper, graph, node):
return current_inputs, current_outputs
def aten_dropout_(mapper, graph, node):
""" 构造Dropout的PaddleLayer。
TorchScript示例:
%119 : Tensor = aten::dropout_(%result.3, %117, %118)
参数含义:
%119 (Tensor): Dropout后的Tensor。
%result.3 (Tensor): 输入Tensor。
%118 (bool): 是否是训练阶段。
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("dropout", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%119
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.Dropout",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
p=0.0)
return current_inputs, current_outputs
def aten_embedding(mapper, graph, node):
""" 构造embedding的PaddleLayer。
TorchScript示例:
......@@ -2326,6 +2231,38 @@ def aten_floor_divide(mapper, graph, node):
return current_inputs, current_outputs
def aten_format(mapper, graph, node):
""" 构造取浮点型的PaddleLayer。
TorchScript示例:
%628 : str = aten::format(%8, %627)
参数含义:
%628 (str): 输出,为一个字符串
%8 (str): 输入字符串
%627 (-): format后的参数
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入
for i in range(len(inputs_node)):
mapper._check_input(graph, inputs_node[i], inputs_name[i],
current_outputs, scope_name)
layer_inputs["input" + str(i)] = inputs_name[i]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.format",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_full_like(mapper, graph, node):
""" 构造创建一个与输入具有相同的形状并且数据类型固定的Tensor的PaddleLayer。
TorchScript示例:
......@@ -2607,10 +2544,10 @@ def aten_gru(mapper, graph, node):
return current_inputs, current_outputs
def aten_hardtanh_(mapper, graph, node):
def aten_hardtanh(mapper, graph, node):
""" 构造hardtanh激活的PaddleLayer。
TorchScript示例:
%result.9 : Tensor = aten::hardtanh_(%input.20, %67, %66)
%result.9 : Tensor = aten::hardtanh(%input.20, %67, %66)
参数含义:
%result.9 (Tensor): 输出,hardtanh激活后的Tensor。
%input.20 (Tensor): 需要hardtanh激活的Tensor。
......@@ -2990,42 +2927,6 @@ def aten_le(mapper, graph, node):
return current_inputs, current_outputs
def aten_leaky_relu_(mapper, graph, node):
""" 构造leaky relu激活的PaddleLayer。
TorchScript示例:
%input.117 : Tensor = aten::leaky_relu_(%input.114, %1570)
参数含义:
%input.117 (Tensor): 输出,leaky relu后的结果。
%input.114 (Tensor): 需要leaky relu的Tensor。
%1570 (float): 输入中的元素小于0时的斜率。
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("leakly_relu", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入、输出的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%1570
layer_attrs["negative_slope"] = mapper.attrs[inputs_name[1]]
graph.add_layer(
"paddle.nn.LeakyReLU",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
def aten_leaky_relu(mapper, graph, node):
""" 构造leaky relu激活的PaddleLayer。
TorchScript示例:
......@@ -3293,115 +3194,6 @@ def aten_lt(mapper, graph, node):
return current_inputs, current_outputs
def aten_masked_fill_(mapper, graph, node):
""" 构造填充mask的PaddleLayer。
TorchScript示例:
%input.4 : Tensor = aten::masked_fill_(%scores.2, %mask.2, %46)
参数含义:
%input.4 (Tensor): 输出,填充后的结果。
%scores.2 (Tensor): 需要填充的Tensor。
%mask.2 (Tensor): bool型的Tensor,哪些位置需要填充。
%46 (-): 填充的值。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输入的list
current_inputs = []
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.4
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
current_inputs.append(inputs_name[0])
graph.add_layer(
"prim.type",
inputs={"input": inputs_name[0]},
outputs=[inputs_name[0] + "_type"],
scope_name=scope_name)
# 处理输入1,即%scores.2
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs,
scope_name)
current_inputs.append(inputs_name[1])
graph.add_layer(
"paddle.logical_not",
inputs={"x": inputs_name[1]},
outputs=[inputs_name[1] + "_not"],
scope_name=scope_name)
graph.add_layer(
"paddle.cast",
inputs={"x": inputs_name[1]},
outputs=[inputs_name[1] + "_mask"],
scope_name=scope_name,
dtype=inputs_name[0] + "_type")
graph.add_layer(
"paddle.cast",
inputs={"x": inputs_name[1] + "_not"},
outputs=[inputs_name[1] + "_not_mask"],
scope_name=scope_name,
dtype=inputs_name[0] + "_type")
graph.add_layer(
"paddle.multiply",
inputs={"x": inputs_name[0],
"y": inputs_name[1] + "_not_mask"},
outputs=[inputs_name[0] + "_not_mask"],
scope_name=scope_name)
# 处理输入2,即%46
mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs,
scope_name)
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[2]},
outputs=[inputs_name[2] + "_cond1"],
scope_name=scope_name,
y="-float('inf')")
graph.add_layer(
"prim.eq",
inputs={"x": inputs_name[2]},
outputs=[inputs_name[2] + "_cond2"],
scope_name=scope_name,
y="float('inf')")
graph.add_layer(
"prim.or",
inputs={
"x": inputs_name[2] + "_cond1",
"y": inputs_name[2] + "_cond2"
},
outputs=[inputs_name[2] + "_cond"],
scope_name=scope_name)
graph.add_layer(
"prim.if", {'input': inputs_name[2] + "_cond"},
outputs=[inputs_name[2] + "_if"],
scope_name=scope_name)
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(source_type="pytorch", parent_layer=if_layer)
block.add_layer(
"prim.equal",
inputs={"input": inputs_name[1] + "_mask"},
outputs=[inputs_name[2] + "_1"],
scope_name=scope_name)
if_layer.add_block(block)
block = PaddleGraph(source_type="pytorch", parent_layer=if_layer)
block.add_layer(
"prim.mul",
inputs={"x": inputs_name[1] + "_mask",
"y": inputs_name[2]},
outputs=[inputs_name[2] + "_1"],
scope_name=scope_name)
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[1] + "_mask"
if_layer.inputs["input-1"] = inputs_name[2]
if_layer.outputs.append(inputs_name[2] + "_1")
graph.add_layer(
"paddle.add",
inputs={"x": inputs_name[2] + "_1",
"y": inputs_name[0] + "_not_mask"},
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_masked_fill(mapper, graph, node):
""" 构造填充mask的PaddleLayer。
TorchScript示例:
......@@ -3799,42 +3591,6 @@ def aten_mul(mapper, graph, node):
return current_inputs, current_outputs
def aten_mul_(mapper, graph, node):
""" 构造数值相乘的PaddleLayer。
TorchScript示例:
%size_prods.39 : int = aten::mul_(%size_prods.38, %114)
参数含义:
%size_prods.39 (Tensor): 输出,相乘后的结果。
%size_prods.38 (-): 数值1。
%114 (-): 数值2。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%size_prods.38
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%114
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs,
scope_name)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
current_outputs = layer_outputs
graph.add_layer(
"prim.mul",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_ne(mapper, graph, node):
""" 构造判断数值是否不相等的PaddleLayer。
TorchScript示例:
......@@ -4304,38 +4060,6 @@ def aten_relu(mapper, graph, node):
return current_inputs, current_outputs
def aten_relu_(mapper, graph, node):
""" 构造ReLU激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::relu_(%input.5)
参数含义:
%result.3 (Tensor): 输出,ReLU后的结果。
%result.5 (Tensor): 需要ReLU的Tensor。
注意: inplace这个参数在paddle中未实现
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("relu", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%result.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.ReLU",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_relu6(mapper, graph, node):
""" 构造ReLU6激活的PaddleLayer。
TorchScript示例:
......@@ -4716,6 +4440,38 @@ def aten_sigmoid(mapper, graph, node):
return current_inputs, current_outputs
def aten_silu(mapper, graph, node):
""" 构造Silu激活的PaddleLayer。
TorchScript示例:
%result.3 : Tensor = aten::silu(%input.5)
参数含义:
%result.3 (Tensor): 输出,Silu后的结果。
%input.5 (Tensor): 需要Silu的Tensor。
注意: inplace这个参数在paddle中未实现
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("silu", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.5
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs,
scope_name)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"paddle.nn.Silu",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_sin(mapper, graph, node):
""" 构造数学计算sin的PaddleLayer。
TorchScript示例:
......@@ -5700,9 +5456,9 @@ def aten_upsample_nearest2d(mapper, graph, node):
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[1]
if "size" in layer_attrs and layer_attrs["size"] is None:
mapper._check_input(graph, inputs_node[3], inputs_name[3],
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs, scope_name)
layer_inputs["scale_factor"] = inputs_name[3]
layer_inputs["scale_factor"] = inputs_name[2]
layer_attrs["align_mode"] = 0
layer_attrs["mode"] = string("nearest")
graph.add_layer(
......
......@@ -304,6 +304,27 @@ def prim_floordiv(layer,
forward_func.extend(gen_codes([line], indent=indent))
def prim_format(layer,
indent=1,
init_func=[],
forward_func=[],
layer_id=None,
different_attrs=None):
line = ""
if len(layer.inputs) == 3:
line = "{} = {}.format({}, {})".format(
layer.outputs[0],
get_value(layer, "input0", different_attrs),
get_value(layer, "input1", different_attrs),
get_value(layer, "input2", different_attrs))
elif len(layer.inputs) == 2:
line = "{} = {}.format({})".format(
layer.outputs[0],
get_value(layer, "input0", different_attrs),
get_value(layer, "input1", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_getitem(layer,
indent=1,
init_func=[],
......@@ -609,8 +630,8 @@ def prim_or(layer,
if is_return_line:
return line.split(" = ")[1]
forward_func.extend(gen_codes([line], indent=indent))
def prim_remainder(layer,
indent=1,
init_func=[],
......@@ -619,8 +640,8 @@ def prim_remainder(layer,
different_attrs=None,
is_return_line=False):
line = "{} = {} % {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
if is_return_line:
return line.split(" = ")[1]
forward_func.extend(gen_codes([line], indent=indent))
......
......@@ -55,6 +55,10 @@ class PyTorchOpMapper():
unsupported_ops = []
for op in op_list:
func_name = op.replace('::', '_')
# Processing suffix is "_" situation, eg: aten_relu_ to aten_relu
# avoid aten::__isnot__ situation
if func_name[-1] == "_" and func_name[-2] != "_":
func_name = func_name[:-1]
if not (hasattr(prim, func_name) or hasattr(aten, func_name)):
unsupported_ops.append(op)
if len(unsupported_ops) == 0:
......@@ -104,6 +108,10 @@ class PyTorchOpMapper():
for node in script_graph.nodes():
kind = node.kind()
func_name = kind.replace('::', '_')
# Processing suffix is "_" situation, eg: aten_relu_ to aten_relu
# avoid aten::__isnot__ situation
if func_name[-1] == "_" and func_name[-2] != "_":
func_name = func_name[:-1]
if hasattr(prim, func_name):
func = getattr(prim, func_name)
inputs, outputs = func(self, graph, node)
......
......@@ -396,8 +396,14 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=dict()):
head, init_func_head, forward_func_head = gen_head(inputs, different_attrs)
output_data_name = ", ".join(outputs)
# remove to_tensor op
forward_func_new = list()
for line in forward_func:
if "paddle.to_tensor" in line:
continue
forward_func_new.append(line)
code_list = head + init_func_head + init_func + \
forward_func_head + forward_func + \
forward_func_head + forward_func_new + \
gen_codes(["return {}".format(output_data_name)], indent=2)
code_str = "".join(code_list)
return code_str
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册