diff --git a/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py b/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py index 00121bb4128182506daca12e612e36e44375b421..28c2e6ae45e4b777b92cafc05b35d3f5c7086e58 100644 --- a/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py @@ -4537,6 +4537,72 @@ def aten_upsample_bilinear2d(mapper, graph, node): **layer_attrs) return current_inputs, current_outputs +def aten_upsample_nearest2d(mapper, graph, node): + """ 构造使用nearest上采样的PaddleLayer。 + + TorchScript示例: + %4997 : Tensor = aten::upsample_nearest2d(%x.13, %4963, %5421, %4995) + 参数含义: + %4997 (Tensor): 输出,上采样后的Tensor。 + %x.13 (Tensor): 需要上采样的Tensor。 + %4963 (list): 上采样后的大小。 + %4995 (float): 高度的乘数因子。 + %4995 (float): 宽度的乘数因子。 + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.13 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%4963 + if inputs_name[1] in mapper.attrs: + layer_attrs["size"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs, scope_name) + layer_inputs["size"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "prim.isinstance", + inputs={"input": inputs_name[1]}, + outputs=[inputs_name[1] + "_isinstance"], + scope_name=scope_name, + cls="paddle.fluid.Variable") + # TODO(syf): paddle.Variable + graph.add_layer( + "prim.if", {"input": inputs_name[1] + "_isinstance"}, + outputs=[inputs_name[0] + "_if1"], + scope_name=scope_name) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(source_type="pytorch", parent_layer=if_layer, graph_type="dygraph") + block.add_layer( + "prim.var2list", + inputs={"input": inputs_name[1]}, + outputs=[inputs_name[1]], + scope_name=scope_name) + if_layer.add_block(block) + block = PaddleGraph(source_type="pytorch", parent_layer=if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[1] + layer_inputs["scale_factor"] = inputs_name[3] + layer_attrs["align_mode"] = 0 + layer_attrs["mode"] = string("nearest") + graph.add_layer( + "paddle.nn.functional.interpolate", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name, + **layer_attrs) + return current_inputs, current_outputs + def aten_values(mapper, graph, node): """ 构造对比大小的PaddleLayer。 diff --git a/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py b/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py index ee36d2d3748d3915c09b0e3683f38675ad2119c4..29abe35d8ca66ef53cdecdc8ab9d6d9d2d19654c 100644 --- a/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py +++ b/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py @@ -300,6 +300,7 @@ class HierarchicalTree(Tree): """ depths = sorted(list(self._hierarchical_order.keys()), reverse=True) all_name_old2new = dict() + current_module_name_list = list() for depth in depths[1:]: # Module的名字与子图的对应关系 module_name2sub_layers = dict() @@ -352,6 +353,10 @@ class HierarchicalTree(Tree): module_name = None else: module_name = name + while module_name in current_module_name_list: + module_name += "__0" + current_module_name_list.append(module_name) + current_module_name_list.append(module_name) self.merge_node(module_name2sub_layers[name], sequentials2attrs_table[name], node_name2sub_layers,