diff --git a/x2paddle/decoder/tf_decoder.py b/x2paddle/decoder/tf_decoder.py index c8380bdc6198f175aa244aa886007b0ab80b5cab..d5d862ad602e5f93f1f7923ca8dd5f28573d7049 100644 --- a/x2paddle/decoder/tf_decoder.py +++ b/x2paddle/decoder/tf_decoder.py @@ -361,6 +361,8 @@ class TFDecoder(object): continue graph_node = TFGraphNode(layer) dtype = graph_node.layer.attr['dtype'].type + if dtype == 10: + continue need_define_shape = 0 if self.define_input_shape: diff --git a/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py b/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py index 00121bb4128182506daca12e612e36e44375b421..28c2e6ae45e4b777b92cafc05b35d3f5c7086e58 100644 --- a/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py @@ -4537,6 +4537,72 @@ def aten_upsample_bilinear2d(mapper, graph, node): **layer_attrs) return current_inputs, current_outputs +def aten_upsample_nearest2d(mapper, graph, node): + """ 构造使用nearest上采样的PaddleLayer。 + + TorchScript示例: + %4997 : Tensor = aten::upsample_nearest2d(%x.13, %4963, %5421, %4995) + 参数含义: + %4997 (Tensor): 输出,上采样后的Tensor。 + %x.13 (Tensor): 需要上采样的Tensor。 + %4963 (list): 上采样后的大小。 + %4995 (float): 高度的乘数因子。 + %4995 (float): 宽度的乘数因子。 + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%x.13 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name) + layer_inputs["x"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + # 处理输入1,即%4963 + if inputs_name[1] in mapper.attrs: + layer_attrs["size"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs, scope_name) + layer_inputs["size"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + graph.add_layer( + "prim.isinstance", + inputs={"input": inputs_name[1]}, + outputs=[inputs_name[1] + "_isinstance"], + scope_name=scope_name, + cls="paddle.fluid.Variable") + # TODO(syf): paddle.Variable + graph.add_layer( + "prim.if", {"input": inputs_name[1] + "_isinstance"}, + outputs=[inputs_name[0] + "_if1"], + scope_name=scope_name) + if_layer = graph.layers[list(graph.layers.keys())[-1]] + block = PaddleGraph(source_type="pytorch", parent_layer=if_layer, graph_type="dygraph") + block.add_layer( + "prim.var2list", + inputs={"input": inputs_name[1]}, + outputs=[inputs_name[1]], + scope_name=scope_name) + if_layer.add_block(block) + block = PaddleGraph(source_type="pytorch", parent_layer=if_layer, graph_type="dygraph") + if_layer.add_block(block) + if_layer.inputs["input-0"] = inputs_name[1] + layer_inputs["scale_factor"] = inputs_name[3] + layer_attrs["align_mode"] = 0 + layer_attrs["mode"] = string("nearest") + graph.add_layer( + "paddle.nn.functional.interpolate", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name, + **layer_attrs) + return current_inputs, current_outputs + def aten_values(mapper, graph, node): """ 构造对比大小的PaddleLayer。 diff --git a/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py b/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py index ad03f06dd262acd4eaddee3b68854fb4363ea051..fe940663f07e0ba2ba0f22e53c2e3e711ef8757d 100644 --- a/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py +++ b/x2paddle/op_mapper/dygraph/tf2paddle/tf_op_mapper.py @@ -676,8 +676,8 @@ class TFOpMapper(OpMapper): input = self.graph.get_input_node(node, 0) paddings = self.graph.get_input_node(node, 1) assert paddings.layer_type == "Const", "Padding should be Const" - paddings = np.flip(paddings.value, 0).flatten().tolist() - dim = int(len(paddings) / 2) + new_paddings = numpy.flip(paddings.value, 0).flatten().tolist() + dim = int(len(new_paddings) / 2) transpose_name = gen_name("pad", "transpose") self.paddle_graph.add_layer( kernel="paddle.transpose", @@ -688,7 +688,7 @@ class TFOpMapper(OpMapper): kernel="paddle.nn.Pad{}D".format(dim), inputs={"x": transpose_name}, outputs=layer_outputs, - pad=new_padding) + pad=new_paddings) self.paddle_graph.add_layer( kernel="paddle.transpose", inputs={"x": node.name}, diff --git a/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py b/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py index 700ac74a0d9bd512f0016ee64cfd1ff792ad4a5f..20317792370bee14ba56691c531b0dc0d656c5ea 100644 --- a/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py +++ b/x2paddle/op_mapper/static/tf2paddle/tf_op_mapper.py @@ -661,7 +661,7 @@ class TFOpMapper(OpMapper): input = self.graph.get_input_node(node, 0) paddings = self.graph.get_input_node(node, 1) assert paddings.layer_type == "Const", "Padding should be Const" - paddings = np.flip(paddings.value, 0).flatten().tolist() + new_paddings = numpy.flip(paddings.value, 0).flatten().tolist() transpose_name = gen_name("pad", "transpose") self.paddle_graph.add_layer( kernel="paddle.transpose", @@ -672,7 +672,7 @@ class TFOpMapper(OpMapper): kernel="paddle.nn.functional.pad".format(dim), inputs={"x": transpose_name}, outputs=[node.name], - pad=new_padding) + pad=new_paddings) self.paddle_graph.add_layer( kernel="paddle.transpose", inputs={"x": node.name}, diff --git a/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py b/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py index ee36d2d3748d3915c09b0e3683f38675ad2119c4..6e7e4afed51cf04334df880c2b9d0adcefb2e7c2 100644 --- a/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py +++ b/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py @@ -300,6 +300,7 @@ class HierarchicalTree(Tree): """ depths = sorted(list(self._hierarchical_order.keys()), reverse=True) all_name_old2new = dict() + current_module_name_list = list() for depth in depths[1:]: # Module的名字与子图的对应关系 module_name2sub_layers = dict() @@ -352,6 +353,9 @@ class HierarchicalTree(Tree): module_name = None else: module_name = name + while module_name in current_module_name_list: + module_name += "__0" + current_module_name_list.append(module_name) self.merge_node(module_name2sub_layers[name], sequentials2attrs_table[name], node_name2sub_layers,