diff --git a/docs/inference_model_convertor/op_list.md b/docs/inference_model_convertor/op_list.md index e68ecb643ff696fa7e025412d93edf20dcc8fd96..fb418db1dd6cc52a8ea0ada98037bbcb54b9b712 100755 --- a/docs/inference_model_convertor/op_list.md +++ b/docs/inference_model_convertor/op_list.md @@ -115,7 +115,7 @@ Aten: | 121 | aten::repeat\_interleave | 122 | aten::maxpool1d | 123 | aten::frobenius\_norm | 124 | aten::format | | 125 | aten::complex | 126 | aten::real | 127 | aten::imag | 128 | aten::fft\_rfftn | | 129 | aten::fft\_irfftn | 130 | aten::hardsigmoid | 131 | aten::hardswish | 132 | aten::linear | -| 133 | aten::rsqrt | 134 | aten::replication\_pad1d | 135 | aten::full | | | +| 133 | aten::rsqrt | 134 | aten::replication\_pad1d | 135 | aten::full | 136 | aten::group\_norm | Prim: | 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP | diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index 0dbad611f116b5d0d8698755fe2d22c1020da3d0..2822f3097e5388d4aa6e774a0c92e6f49eebb9d1 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -2654,6 +2654,59 @@ def aten_gt(mapper, graph, node): return current_inputs, current_outputs +def aten_group_norm(mapper, graph, node): + """ + TorchScript Code: + %input.81 : Tensor = aten::group_norm(%input.2, %25, %60, %59, %26, %30) + Parameter meaning: + %input.81 (Tensor): Output Tensor + %input.2 (Tensor): Input Tensor + %25 (Tensor): num_groups + %60 (Tensor): weight + %59 (Tensor): bias + %26 (Tensor): eps + %30 (bool): enabled cudnn + """ + scope_name = mapper.normalize_scope_name(node) + op_name = name_generator("groupnorm", mapper.nn_name2id) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [op_name, output_name] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # output list + current_outputs = [output_name] + # process Input Tensor + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + layer_inputs["input"] = inputs_name[0] + # input list + current_inputs = list(layer_inputs.values()) + # process num_groups + layer_attrs['num_groups'] = mapper.attrs[inputs_name[1]] + # process weight + weights = mapper.pytorch_params[inputs_name[2]] + mapper.paddle_params[op_name + ".weight"] = weights + layer_attrs['num_channels'] = weights.shape[0] + # process bias + if inputs_name[2] in mapper.pytorch_params: + bias = mapper.pytorch_params[inputs_name[3]] + if bias is not None: + mapper.paddle_params[op_name + ".bias"] = bias + else: + mapper.paddle_params[op_name + ".bias"] = False + # process eps + layer_attrs["epsilon"] = mapper.attrs[inputs_name[4]] + + graph.add_layer( + "paddle.nn.GroupNorm", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name, + **layer_attrs) + return current_inputs, current_outputs + + def aten_gru(mapper, graph, node): """ 构造门控循环单元网络(GRU)的PaddleLayer。 TorchScript示例: