diff --git a/x2paddle/core/program.py b/x2paddle/core/program.py index 75c4be504f77f7bbfb22f25a001d30f88da4c691..ab76c9150700eed317addb13bcdf42478696281a 100644 --- a/x2paddle/core/program.py +++ b/x2paddle/core/program.py @@ -101,6 +101,11 @@ class PaddleGraph(object): self.clear_edges() outputs_from_nodes = dict() for layer_id, layer in self.layers.items(): + # if "x5109" in layer.outputs or "x5110" in layer.outputs: + # print(layer.kernel) + # print(layer.inputs) + # print(layer.outputs) + # print(layer.attrs) for input_key, input_var in layer.inputs.items(): vs = input_var if not isinstance(vs, list): diff --git a/x2paddle/op_mapper/pytorch2paddle/prim.py b/x2paddle/op_mapper/pytorch2paddle/prim.py index 7661714d4c9f735a562200a82afc05d46405eea7..60dba6832600f98da7b1ef9dbaabb0205a8f91fe 100644 --- a/x2paddle/op_mapper/pytorch2paddle/prim.py +++ b/x2paddle/op_mapper/pytorch2paddle/prim.py @@ -44,7 +44,7 @@ def prim_GetAttr(mapper, graph, node): %7 (Tensor): 输入Tensor。 %27 (Tensor): 输入Tensor。 """ - output_name = mapper._get_outputs_name(node)[0] + current_node = node field_name_list = [node.s('name')] while True: input_node = list(node.inputs())[0].node() @@ -53,18 +53,16 @@ def prim_GetAttr(mapper, graph, node): node = input_node except Exception: break - if ".".join(field_name_list) in mapper.pytorch_params: - mapper.pytorch_params[output_name] = mapper.pytorch_params[".".join( - field_name_list)] - else: - part_script = mapper.script - for field_name in field_name_list: - if hasattr(part_script, field_name): - param = getattr(part_script, field_name) - if isinstance(param, torch.Tensor): - param = param.detach().numpy() - mapper.pytorch_params[output_name] = param - part_script = param + attr_name = ".".join(field_name_list) + output_name = mapper._get_outputs_name(current_node, attr_name)[0] + part_script = mapper.script + for field_name in field_name_list: + if hasattr(part_script, field_name): + param = getattr(part_script, field_name) + if isinstance(param, torch.Tensor): + param = param.detach().numpy() + mapper.pytorch_params[output_name] = param + part_script = param return [], [output_name] @@ -295,8 +293,15 @@ def prim_SetAttr(mapper, graph, node): field_name_list.append(node.s('name')) inputs_name, inputs_node = mapper._get_inputs_name(node) - param = {"Tensor": inputs_name[1]} + param = { + "Tensor": "self." + ".".join(field_name_list).replace(".", "_"), + "parent_layer_id": graph.parent_layer.id + } mapper.pytorch_params[".".join(field_name_list)] = param + graph.add_layer( + "prim.set_attr", + inputs={"input": inputs_name[1]}, + outputs=["self." + ".".join(field_name_list).replace(".", "_")]) return [], [output_name] diff --git a/x2paddle/op_mapper/pytorch2paddle/prim2code.py b/x2paddle/op_mapper/pytorch2paddle/prim2code.py index 5293d181f8ebbbe8c3ba4bf9e0539f57a962cd19..d30c3ee488bd0a7b1b92890e1824c2b95d2bf0ac 100644 --- a/x2paddle/op_mapper/pytorch2paddle/prim2code.py +++ b/x2paddle/op_mapper/pytorch2paddle/prim2code.py @@ -208,6 +208,11 @@ def prim_select(layer, indent=1, init_func=[], forward_func=[]): forward_func.extend(gen_codes([line], indent=indent)) +def prim_set_attr(layer, indent=1, init_func=[], forward_func=[]): + line = "{} = {}".format(layer.outputs[0], get_value(layer, "input")) + forward_func.extend(gen_codes([line], indent=indent)) + + def prim_shape(layer, indent=1, init_func=[], forward_func=[]): line = "{} = {}.shape".format(layer.outputs[0], get_value(layer, "input")) forward_func.extend(gen_codes([line], indent=indent)) diff --git a/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py b/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py index ab51e73450a9f3530fba68d499e40ab72d196a54..95396eb3efad544daea6ae1b2d5fecdbce4656c3 100644 --- a/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py +++ b/x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py @@ -113,15 +113,19 @@ class PyTorchOpMapper(OpMapper): graph.set_parameters(self.paddle_params) return graph, graph_inputs - def _get_outputs_name(self, node): + def _get_outputs_name(self, node, attr_name=None): outputs_name = [] for output_ivalue in node.outputs(): - output_name = 'x' + str(self.output_index) script_unique_id = output_ivalue.unique() - if script_unique_id in self.outputs_info: - output_name = self.outputs_info[script_unique_id] + if attr_name is None: + output_name = 'x' + str(self.output_index) + if script_unique_id in self.outputs_info: + output_name = self.outputs_info[script_unique_id] + else: + output_name = attr_name.replace(".", "_") self.outputs_info[script_unique_id] = output_name self.output_index += 1 + outputs_name.append(output_name) # if或loop节点没有输出的情况 if len(list(node.outputs())) == 0: @@ -148,7 +152,33 @@ class PyTorchOpMapper(OpMapper): outputs=[output_name], value="params[{}]".format(string(output_name))) else: - if isinstance(param, dict) and "Tensor" in param: + if isinstance(param, dict) and "Tensor" in param and \ + "parent_layer_id" in param: + if graph.parent_layer is not None: + # 当某个param被2个控制流(if-else)赋值时,else不可以引用if中的赋值结果 + id1 = param["parent_layer_id"] + id2 = graph.parent_layer.id + id1_part = id1.split(".") + id2_part = id2.split(".") + if len(id1_part) >= len(id2_part): + for i in range(len(id1_part)): + if id1_part[i] == id2_part[i]: + continue + else: + if id1_part[i] == "0" and id2_part[ + i] == "1": + if add_dim: + param = param[np.newaxis, :] + self.paddle_params[output_name] = param + graph.add_layer( + "fluid.dygraph.base.to_variable", + inputs={}, + outputs=[output_name], + value="params[{}]".format( + string(output_name))) + node_outputs.append(output_name) + return + # 若if-else外,则可直接引用if-else中的赋值结果 graph.add_layer( "prim.constant", inputs={}, diff --git a/x2paddle/optimizer/fusion/__init__.py b/x2paddle/optimizer/fusion/__init__.py index d9b69674f88a87abd15a665a3538550751d5e0cb..543afebb82d1e9fb6de264ab36df71e1d28936eb 100644 --- a/x2paddle/optimizer/fusion/__init__.py +++ b/x2paddle/optimizer/fusion/__init__.py @@ -20,3 +20,5 @@ from .functional_adaptive_pool2d_fuser import FunctionalAdaptivePool2dFuser from .functional_adaptive_pool2d_fuse_pass import FunctionalAdaptivePool2dFusePass from .constant_fuser import ConstantFuser from .constant_fuse_pass import ConstantFusePass +from .batchnorm2d_fuser import BatchNorm2dFuser +from .batchnorm2d_fuse_pass import BatchNorm2dFusePass diff --git a/x2paddle/optimizer/optimizer.py b/x2paddle/optimizer/optimizer.py index 7f239ab3439107eb9dfbb544a94338b39410a9be..4e6b42a314455374467217996f7c2690ab78efb3 100644 --- a/x2paddle/optimizer/optimizer.py +++ b/x2paddle/optimizer/optimizer.py @@ -20,7 +20,8 @@ class GraphOptimizer(object): def __init__(self): self.passes = [ "fc_fuse_pass", "nn_adaptive_pool2d_fuse_pass", - "functional_adaptive_pool2d_fuse_pass", "constant_fuse_pass" + "functional_adaptive_pool2d_fuse_pass", "batchnorm2d_fuse_pass", + "constant_fuse_pass" ] def optimize(self, graph):