diff --git a/docs/inference_model_convertor/op_list.md b/docs/inference_model_convertor/op_list.md index e68ecb643ff696fa7e025412d93edf20dcc8fd96..96fae502d8d72ae25fc782eef5f020d09024740b 100755 --- a/docs/inference_model_convertor/op_list.md +++ b/docs/inference_model_convertor/op_list.md @@ -115,7 +115,8 @@ Aten: | 121 | aten::repeat\_interleave | 122 | aten::maxpool1d | 123 | aten::frobenius\_norm | 124 | aten::format | | 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft\_rfftn | | 129 | aten::fft\_irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear | -| 133 | aten::rsqrt | 134 | aten::replication\_pad1d | 135 | aten::full | | | +| 133 | aten::rsqrt | 134 | aten::replication\_pad1d | 135 | aten::full | 136 | aten::argmax | +| 137 | aten::argmax | 138 | aten::copy | | | | | Prim: | 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP | diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index e9259de4687dad0dabdf3c910c69b9a74e549889..739109e01dffdc207f469f40cc9f50872f93b06e 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -484,6 +484,55 @@ def aten_arange(mapper, graph, node): return current_inputs, current_outputs +def aten_argmax(mapper, graph, node): + """ + TorchScript: + %x.28 : Tensor = aten::argmax(%result.1, %4967, %3, %2) + Parameter meaning: + %x.28 (Tensor): Output Tensor + %result.1 (Tensor): Input Tensor + %4967 (int/list): Axis + %3 (bool): Keepdim + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # output list + current_outputs = [output_name] + # process Input Tensor + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["x"] = inputs_name[0] + current_inputs = list(layer_inputs.values()) + # process Axis + if inputs_name[1] in mapper.attrs: + layer_attrs["axis"] = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs, scope_name) + layer_inputs["axis"] = inputs_name[1] + current_inputs.append(inputs_name[1]) + # process Keepdim + if inputs_name[2] in mapper.attrs: + layer_attrs["keepdim"] = mapper.attrs[inputs_name[2]] + else: + mapper._check_input(graph, inputs_node[2], inputs_name[2], + current_outputs, scope_name) + layer_inputs["keepdim"] = inputs_name[2] + current_inputs.append(inputs_name[2]) + + graph.add_layer( + "paddle.argmax", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name, + **layer_attrs) + return current_inputs, current_outputs + + def aten_avg_pool2d(mapper, graph, node): """ 构造最大池化的PaddleLayer。 TorchScript示例: @@ -1075,6 +1124,35 @@ def aten_complex(mapper, graph, node): return current_inputs, current_outputs +def aten_copy(mapper, graph, node): + """ + TorchScript Code: + %107 : Tensor = aten::copy(%new_mem.1) + Parameter meaning: + %107 (Tensor): Output Tensor + %new_mem.1 (Tensor): Input Tensor + """ + scope_name = mapper.normalize_scope_name(node) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # output list + current_outputs = [output_name] + # process Input Tensor + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["input"] = inputs_name[0] + current_inputs = list(layer_inputs.values()) + graph.add_layer( + "prim.equal", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name) + + return current_inputs, current_outputs + + def aten___contains__(mapper, graph, node): """ 构造in的PaddleLayer。 TorchScript示例: