未验证 提交 b36cd902 编写于 作者: S SunAhong1993 提交者: GitHub

Merge pull request #21 from PaddlePaddle/develop

merge1
...@@ -361,6 +361,8 @@ class TFDecoder(object): ...@@ -361,6 +361,8 @@ class TFDecoder(object):
continue continue
graph_node = TFGraphNode(layer) graph_node = TFGraphNode(layer)
dtype = graph_node.layer.attr['dtype'].type dtype = graph_node.layer.attr['dtype'].type
if dtype == 10:
continue
need_define_shape = 0 need_define_shape = 0
if self.define_input_shape: if self.define_input_shape:
......
...@@ -4537,6 +4537,72 @@ def aten_upsample_bilinear2d(mapper, graph, node): ...@@ -4537,6 +4537,72 @@ def aten_upsample_bilinear2d(mapper, graph, node):
**layer_attrs) **layer_attrs)
return current_inputs, current_outputs return current_inputs, current_outputs
def aten_upsample_nearest2d(mapper, graph, node):
""" 构造使用nearest上采样的PaddleLayer。
TorchScript示例:
%4997 : Tensor = aten::upsample_nearest2d(%x.13, %4963, %5421, %4995)
参数含义:
%4997 (Tensor): 输出,上采样后的Tensor。
%x.13 (Tensor): 需要上采样的Tensor。
%4963 (list): 上采样后的大小。
%4995 (float): 高度的乘数因子。
%4995 (float): 宽度的乘数因子。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%x.13
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入1,即%4963
if inputs_name[1] in mapper.attrs:
layer_attrs["size"] = mapper.attrs[inputs_name[1]]
else:
mapper._check_input(graph, inputs_node[1], inputs_name[1],
current_outputs, scope_name)
layer_inputs["size"] = inputs_name[1]
current_inputs.append(inputs_name[1])
graph.add_layer(
"prim.isinstance",
inputs={"input": inputs_name[1]},
outputs=[inputs_name[1] + "_isinstance"],
scope_name=scope_name,
cls="paddle.fluid.Variable")
# TODO(syf): paddle.Variable
graph.add_layer(
"prim.if", {"input": inputs_name[1] + "_isinstance"},
outputs=[inputs_name[0] + "_if1"],
scope_name=scope_name)
if_layer = graph.layers[list(graph.layers.keys())[-1]]
block = PaddleGraph(source_type="pytorch", parent_layer=if_layer, graph_type="dygraph")
block.add_layer(
"prim.var2list",
inputs={"input": inputs_name[1]},
outputs=[inputs_name[1]],
scope_name=scope_name)
if_layer.add_block(block)
block = PaddleGraph(source_type="pytorch", parent_layer=if_layer, graph_type="dygraph")
if_layer.add_block(block)
if_layer.inputs["input-0"] = inputs_name[1]
layer_inputs["scale_factor"] = inputs_name[3]
layer_attrs["align_mode"] = 0
layer_attrs["mode"] = string("nearest")
graph.add_layer(
"paddle.nn.functional.interpolate",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
def aten_values(mapper, graph, node): def aten_values(mapper, graph, node):
""" 构造对比大小的PaddleLayer。 """ 构造对比大小的PaddleLayer。
......
...@@ -676,8 +676,8 @@ class TFOpMapper(OpMapper): ...@@ -676,8 +676,8 @@ class TFOpMapper(OpMapper):
input = self.graph.get_input_node(node, 0) input = self.graph.get_input_node(node, 0)
paddings = self.graph.get_input_node(node, 1) paddings = self.graph.get_input_node(node, 1)
assert paddings.layer_type == "Const", "Padding should be Const" assert paddings.layer_type == "Const", "Padding should be Const"
paddings = np.flip(paddings.value, 0).flatten().tolist() new_paddings = numpy.flip(paddings.value, 0).flatten().tolist()
dim = int(len(paddings) / 2) dim = int(len(new_paddings) / 2)
transpose_name = gen_name("pad", "transpose") transpose_name = gen_name("pad", "transpose")
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
kernel="paddle.transpose", kernel="paddle.transpose",
...@@ -688,7 +688,7 @@ class TFOpMapper(OpMapper): ...@@ -688,7 +688,7 @@ class TFOpMapper(OpMapper):
kernel="paddle.nn.Pad{}D".format(dim), kernel="paddle.nn.Pad{}D".format(dim),
inputs={"x": transpose_name}, inputs={"x": transpose_name},
outputs=layer_outputs, outputs=layer_outputs,
pad=new_padding) pad=new_paddings)
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
kernel="paddle.transpose", kernel="paddle.transpose",
inputs={"x": node.name}, inputs={"x": node.name},
......
...@@ -661,7 +661,7 @@ class TFOpMapper(OpMapper): ...@@ -661,7 +661,7 @@ class TFOpMapper(OpMapper):
input = self.graph.get_input_node(node, 0) input = self.graph.get_input_node(node, 0)
paddings = self.graph.get_input_node(node, 1) paddings = self.graph.get_input_node(node, 1)
assert paddings.layer_type == "Const", "Padding should be Const" assert paddings.layer_type == "Const", "Padding should be Const"
paddings = np.flip(paddings.value, 0).flatten().tolist() new_paddings = numpy.flip(paddings.value, 0).flatten().tolist()
transpose_name = gen_name("pad", "transpose") transpose_name = gen_name("pad", "transpose")
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
kernel="paddle.transpose", kernel="paddle.transpose",
...@@ -672,7 +672,7 @@ class TFOpMapper(OpMapper): ...@@ -672,7 +672,7 @@ class TFOpMapper(OpMapper):
kernel="paddle.nn.functional.pad".format(dim), kernel="paddle.nn.functional.pad".format(dim),
inputs={"x": transpose_name}, inputs={"x": transpose_name},
outputs=[node.name], outputs=[node.name],
pad=new_padding) pad=new_paddings)
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
kernel="paddle.transpose", kernel="paddle.transpose",
inputs={"x": node.name}, inputs={"x": node.name},
......
...@@ -300,6 +300,7 @@ class HierarchicalTree(Tree): ...@@ -300,6 +300,7 @@ class HierarchicalTree(Tree):
""" """
depths = sorted(list(self._hierarchical_order.keys()), reverse=True) depths = sorted(list(self._hierarchical_order.keys()), reverse=True)
all_name_old2new = dict() all_name_old2new = dict()
current_module_name_list = list()
for depth in depths[1:]: for depth in depths[1:]:
# Module的名字与子图的对应关系 # Module的名字与子图的对应关系
module_name2sub_layers = dict() module_name2sub_layers = dict()
...@@ -352,6 +353,9 @@ class HierarchicalTree(Tree): ...@@ -352,6 +353,9 @@ class HierarchicalTree(Tree):
module_name = None module_name = None
else: else:
module_name = name module_name = name
while module_name in current_module_name_list:
module_name += "__0"
current_module_name_list.append(module_name)
self.merge_node(module_name2sub_layers[name], self.merge_node(module_name2sub_layers[name],
sequentials2attrs_table[name], sequentials2attrs_table[name],
node_name2sub_layers, node_name2sub_layers,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册