diff --git a/docs/inference_model_convertor/op_list.md b/docs/inference_model_convertor/op_list.md index e4996c7b8702c513364a2ff983664ee9a4c5564f..2cf8ca6186905d6704400dda9d1b4e4e565edb03 100755 --- a/docs/inference_model_convertor/op_list.md +++ b/docs/inference_model_convertor/op_list.md @@ -114,7 +114,7 @@ Aten: | 117 | aten::bitwise\_not | 118 | aten::bitwise\_xor | 119 | aten::bitwise\_and | 120 | aten::silu | | 121 | aten::repeat\_interleave | 122 | aten::maxpool1d | 123 | aten::frobenius\_norm | 124 | aten::format | | 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft\_rfftn | -| 129 | aten::fft\_irfftn | aten::linear | | | | | | +| 129 | aten::fft\_irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear | Prim: diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index 56d3b7d9f5bdf7a6414e6b78dab4ef7e772cbbcb..dc97d00d56384779b793b438b67377ebffc468ca 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -2743,6 +2743,68 @@ def aten_hardtanh(mapper, graph, node): return current_inputs, current_outputs +def aten_hardsigmoid(mapper, graph, node): + """ + TorchScript Code: + %55 : Tensor = aten::hardsigmoid(%54) + Parameter meaning: + %55 (Tensor): output + %54 (Tensor): input tensor + """ + scope_name = mapper.normalize_scope_name(node) + op_name = name_generator("hardsigmoid", mapper.nn_name2id) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [op_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # outputs list + current_outputs = [output_name] + # inputs list + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["x"] = inputs_name[0] + + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.Hardsigmoid", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name) + return current_inputs, current_outputs + + +def aten_hardswish(mapper, graph, node): + """ + TorchScript Code: + %55 : Tensor = aten::hardswish(%54) + Parameter meaning: + %55 (Tensor): output + %54 (Tensor): input tensor + """ + scope_name = mapper.normalize_scope_name(node) + op_name = name_generator("hardswish", mapper.nn_name2id) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [op_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # outputs list + current_outputs = [output_name] + # inputs list + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["x"] = inputs_name[0] + + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.Hardswish", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name) + return current_inputs, current_outputs + + def aten_index(mapper, graph, node): """ 构造选择元素的PaddleLayer。 TorchScript示例: