diff --git a/docs/inference_model_convertor/op_list.md b/docs/inference_model_convertor/op_list.md index 2cf8ca6186905d6704400dda9d1b4e4e565edb03..3fc921ee50703485bd5d64fd1a661c9d5ad433fd 100755 --- a/docs/inference_model_convertor/op_list.md +++ b/docs/inference_model_convertor/op_list.md @@ -115,6 +115,7 @@ Aten: | 121 | aten::repeat\_interleave | 122 | aten::maxpool1d | 123 | aten::frobenius\_norm | 124 | aten::format | | 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft\_rfftn | | 129 | aten::fft\_irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear | +| 133 | aten::rsqrt | | | | | | | Prim: diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index dc97d00d56384779b793b438b67377ebffc468ca..11fd90e2ac117fc2be61574c3677ac3833ec4614 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -4789,6 +4789,36 @@ def aten_rsub(mapper, graph, node): return current_inputs, current_outputs +def aten_rsqrt(mapper, graph, node): + """ + TorchScript Code: + %n0.3 : Tensor = aten::rsqrt(%n.3) + Parameter meaning: + %n0.3 (Tensor): output tensor + %n.3 (Tensor): input tensor + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # outputs list + current_outputs = [output_name] + # inputs list + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["x"] = inputs_name[0] + + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.rsqrt", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name) + return current_inputs, current_outputs + + def aten_ScalarImplicit(mapper, graph, node): """ 构造获取scalar的PaddleLayer。 TorchScript示例: