From 961e600db0b021ddec93991ddb3650c3b44836dc Mon Sep 17 00:00:00 2001 From: WJJ1995 Date: Wed, 15 Sep 2021 17:24:26 +0800 Subject: [PATCH] Add bitwise ops (#677) * Add bitwise ops * update op_list.md --- docs/inference_model_convertor/op_list.md | 1 + x2paddle/op_mapper/pytorch2paddle/aten.py | 101 ++++++++++++++++++++++ 2 files changed, 102 insertions(+) diff --git a/docs/inference_model_convertor/op_list.md b/docs/inference_model_convertor/op_list.md index f482d4b..b2af9ea 100644 --- a/docs/inference_model_convertor/op_list.md +++ b/docs/inference_model_convertor/op_list.md @@ -108,6 +108,7 @@ Aten: | 105 | aten::where | 106 | aten::zeros |107|aten::zeros\_like|108|aten::bmm| | 109 | aten::sub\_ | 110 | aten:erf |111|aten::lstm|112|aten::gather| | 113 | aten::upsample\_nearest2d | 114 | aten::split\_with\_sizes | 115 | aten::sum | 116 | aten::instance_norm | +| 117 | aten::bitwise_not | 118 | aten::bitwise_xor | 119 | aten::bitwise_and | | | Prim: | 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP | diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index 4da7ec5..a886e57 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -71,6 +71,7 @@ def aten_sum(mapper, graph, node): **layer_attrs) return current_inputs, current_outputs + def aten_abs(mapper, graph, node): """ 构造获取绝对值的PaddleLayer。 TorchScript示例: @@ -749,6 +750,106 @@ def aten_batch_norm(mapper, graph, node): return current_inputs, current_outputs +def aten_bitwise_not(mapper, graph, node): + """ 构造矩阵相乘的PaddleLayer。 + TorchScript示例: + %x.222 : Tensor = aten::bitwise_not(%32) + 参数含义: + %x.222 (Tensor): 输出,逻辑非运算后的结果。 + %32 (Tensor): 输入1。 + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%32 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["input"] = inputs_name[0] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.not", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name) + return current_inputs, current_outputs + + +def aten_bitwise_xor(mapper, graph, node): + """ 构造矩阵相乘的PaddleLayer。 + TorchScript示例: + %x.222 : Tensor = aten::bitwise_xor(%32, %8) + 参数含义: + %x.222 (Tensor): 输出,逻辑或运算后的结果。 + %32 (Tensor): 输入1。 + %8 (Tensor): 输入2。 + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%32 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%8 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, + scope_name) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.or", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name) + return current_inputs, current_outputs + + +def aten_bitwise_and(mapper, graph, node): + """ 构造矩阵相乘的PaddleLayer。 + TorchScript示例: + %x.222 : Tensor = aten::bitwise_and(%32, %8) + 参数含义: + %x.222 (Tensor): 输出,逻辑与运算后的结果。 + %32 (Tensor): 输入1。 + %8 (Tensor): 输入2。 + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%32 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%8 + mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, + scope_name) + layer_inputs["y"] = inputs_name[1] + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "prim.and", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name) + return current_inputs, current_outputs + + def aten_bmm(mapper, graph, node): """ 构造矩阵相乘的PaddleLayer。 TorchScript示例: -- GitLab