提交 573b7b14 编写于 作者: W wjj19950828

add aten::format and remove to_tensor

上级 b3e5fdc7
......@@ -87,6 +87,11 @@ def arg_parser():
type=_text_type,
default=None,
help="pretrain model file of pytorch model")
parser.add_argument(
"--code_optimizer",
"-co",
default=True,
help="Turn on code optimization")
parser.add_argument(
"--to_lite", "-tl", default=False, help="convert to Paddle-Lite format")
parser.add_argument(
......@@ -220,6 +225,7 @@ def pytorch2paddle(module,
save_dir,
jit_type="trace",
input_examples=None,
code_optimizer=True,
convert_to_lite=False,
lite_valid_places="arm",
lite_model_type="naive_buffer"):
......@@ -253,7 +259,8 @@ def pytorch2paddle(module,
graph_opt = GraphOptimizer(source_frame="pytorch", jit_type=jit_type)
graph_opt.optimize(mapper.paddle_graph)
print("Model optimized.")
mapper.paddle_graph.gen_model(save_dir, jit_type=jit_type)
mapper.paddle_graph.gen_model(
save_dir, jit_type=jit_type, code_optimizer=code_optimizer)
if convert_to_lite:
convert2lite(save_dir, lite_valid_places, lite_model_type)
......
......@@ -237,11 +237,11 @@ class PaddleGraph(object):
return update(self.layers)
def gen_model(self, save_dir, jit_type=None):
def gen_model(self, save_dir, jit_type=None, code_optimizer=True):
if not osp.exists(save_dir):
os.makedirs(save_dir)
if jit_type == "trace":
if not self.has_unpack:
if not self.has_unpack and code_optimizer:
from x2paddle.optimizer.pytorch_code_optimizer import HierarchicalTree
hierarchical_tree = HierarchicalTree(self)
for layer_id, layer in self.layers.items():
......@@ -252,7 +252,7 @@ class PaddleGraph(object):
self.gen_code(save_dir)
self.dump_parameter(save_dir)
else:
if self.source_type == "pytorch":
if self.source_type == "pytorch" and code_optimizer:
from x2paddle.optimizer.pytorch_code_optimizer import ModuleGraph
module_graph = ModuleGraph(self)
module_graph.save_source_files(save_dir)
......
......@@ -2231,6 +2231,38 @@ def aten_floor_divide(mapper, graph, node):
return current_inputs, current_outputs
def aten_format(mapper, graph, node):
""" 构造取浮点型的PaddleLayer。
TorchScript示例:
%628 : str = aten::format(%8, %627)
参数含义:
%628 (str): 输出,为一个字符串
%8 (str): 输入字符串
%627 (-): format后的参数
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入
for i in range(len(inputs_node)):
mapper._check_input(graph, inputs_node[i], inputs_name[i],
current_outputs, scope_name)
layer_inputs["input" + str(i)] = inputs_name[i]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.format",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name)
return current_inputs, current_outputs
def aten_full_like(mapper, graph, node):
""" 构造创建一个与输入具有相同的形状并且数据类型固定的Tensor的PaddleLayer。
TorchScript示例:
......
......@@ -304,6 +304,27 @@ def prim_floordiv(layer,
forward_func.extend(gen_codes([line], indent=indent))
def prim_format(layer,
indent=1,
init_func=[],
forward_func=[],
layer_id=None,
different_attrs=None):
line = ""
if len(layer.inputs) == 3:
line = "{} = {}.format({}, {})".format(
layer.outputs[0],
get_value(layer, "input0", different_attrs),
get_value(layer, "input1", different_attrs),
get_value(layer, "input2", different_attrs))
elif len(layer.inputs) == 2:
line = "{} = {}.format({})".format(
layer.outputs[0],
get_value(layer, "input0", different_attrs),
get_value(layer, "input1", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_getitem(layer,
indent=1,
init_func=[],
......@@ -609,8 +630,8 @@ def prim_or(layer,
if is_return_line:
return line.split(" = ")[1]
forward_func.extend(gen_codes([line], indent=indent))
def prim_remainder(layer,
indent=1,
init_func=[],
......@@ -619,8 +640,8 @@ def prim_remainder(layer,
different_attrs=None,
is_return_line=False):
line = "{} = {} % {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
if is_return_line:
return line.split(" = ")[1]
forward_func.extend(gen_codes([line], indent=indent))
......
......@@ -257,6 +257,14 @@ def gen_layer_code(graph, sub_layers, sub_layers_name, different_attrs=dict()):
if is_set_item:
outputs.append(layer.outputs[0])
no_output_count = 0
# remove to_tensor layer
invalid_list = list()
for layer_id, layer in sub_layers.items():
if layer.kernel == "paddle.to_tensor":
invalid_list.append(layer_id)
break
for layer_id in invalid_list:
sub_layers.pop(layer_id)
for i, (layer_id, layer) in enumerate(sub_layers.items()):
_update_attrs(layer, different_attrs)
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel) or \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册