diff --git a/x2paddle/core/program.py b/x2paddle/core/program.py index df47ee0622ac4eb9189717735125c3892673c7f0..ddc1519f4f42e52a5277da231a17affdb283ea37 100644 --- a/x2paddle/core/program.py +++ b/x2paddle/core/program.py @@ -508,9 +508,9 @@ class PaddleGraph(object): ], indent=0) comment_list = list() - comment_list.append("# 共{}个输入".format(len(self.inputs_info))) + comment_list.append("# There are {} inputs.".format(len(self.inputs_info))) for k, v in self.inputs_info.items(): - comment_list.append("# {}: 形状为{},类型为{}。".format(k, v[0], v[1])) + comment_list.append("# {}: shape-{},type-{}。".format(k, v[0], v[1])) self.run_func.extend( gen_codes( comment_list, diff --git a/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py b/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py index 42c709815042457e64898e762dded8d6dcd33140..dcecf0e980667923779706501f8081f7ca91aec8 100644 --- a/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py +++ b/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py @@ -571,7 +571,7 @@ class CaffeOpMapper(OpMapper): if params.HasField('negative_slope') and params.negative_slope != 0: negative_slope = float(params.negative_slope) - layer_attrs = {'alpha': negative_slope} + layer_attrs = {'negative_slope': negative_slope} self.paddle_graph.add_layer( "paddle.nn.LeakyReLU", inputs={"input": input.name}, diff --git a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py index d4abf556c7dc28be28ecb1317bd2842c903b43ab..fedb75fa8748129fe7f6e4eb418126c4b3859ab6 100755 --- a/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/dygraph/onnx2paddle/opset9/opset.py @@ -445,6 +445,8 @@ class OpSet9(): layer_outputs = [nn_op_name, output_name] if is_pads_attr: paddings = [] + if len(pads) == 10 and sum(pads) == 0: + pads = pads[0: 6] if len(pads) in [2, 4, 6]: if data_shape: assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW @@ -728,11 +730,14 @@ class OpSet9(): inputs={'x': name_trans, 'index': indices.name}, outputs=[node.name]) + new_perm = [0] * len(perm) + for i in range(len(perm)): + new_perm[perm[i]] = i self.paddle_graph.add_layer( 'paddle.transpose', inputs={"x": node.name}, outputs=[node.name], - perm=perm) + perm=new_perm) if len(indices_shape) < 1: self.paddle_graph.add_layer( 'paddle.squeeze', @@ -809,11 +814,15 @@ class OpSet9(): 'index': indices_reshape}, outputs=[node.name]) input_transpose = node.name + '_transpose' + new_perm = [0] * len(perm) + for i in range(len(perm)): + new_perm[perm[i]] = i self.paddle_graph.add_layer( 'paddle.transpose', inputs={"x": node.name}, outputs=[input_transpose], - perm=perm) + perm=new_perm) + perm = new_perm val_x_shape = val_x.out_shapes[0] reshaped_shape = [] for i in perm: diff --git a/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py b/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py index d6c6493edf63397e939de3bdfe6625165be57531..636fb41eb081644c26674b330a57b4d0df8d5f6e 100644 --- a/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/dygraph/pytorch2paddle/aten.py @@ -3353,6 +3353,42 @@ def aten_pow(mapper, graph, node): return current_inputs, current_outputs +def aten_prelu(mapper, graph, node): + """ 构造prelu激活的PaddleLayer。 + + TorchScript示例: + %result.3 : aten::prelu(%input.150, %999) + 参数含义: + %result.3 (Tensor): 输出,prelu后的结果。 + %input.150 (Tensor): 需要prelu的Tensor。 + %999 (Tnsor): 权重。 + """ + scope_name = mapper.normalize_scope_name(node) + op_name = name_generator("relu", mapper.nn_name2id) + output_name = mapper._get_outputs_name(node)[0] + layer_outputs = [op_name, output_name] + layer_inputs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # 获取当前节点输出的list + current_outputs = [output_name] + # 处理输入0,即%result.150 + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name) + layer_inputs["x"] = inputs_name[0] + # 处理输入1,即%999 + weight = mapper.pytorch_params[inputs_name[1]] + mapper.paddle_params[op_name + "._weight"] = weight + # 获取当前节点输入的list + current_inputs = list(layer_inputs.values()) + + graph.add_layer( + "paddle.nn.PReLU", + inputs=layer_inputs, + outputs=layer_outputs, + scope_name=scope_name, + num_parameters=weight.shape[0]) + return current_inputs, current_outputs + + def aten_relu(mapper, graph, node): """ 构造ReLU激活的PaddleLayer。 diff --git a/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py index a8664333c72673ccfc254b68b8953e50aba40a29..ef7e97fe2a089c9c9765cc487369c75642ffd042 100644 --- a/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/static/onnx2paddle/opset9/opset.py @@ -407,6 +407,8 @@ class OpSet9(): if is_pads_attr: paddings = [] paddle_op = 'paddle.nn.functional.pad' + if len(pads) == 10 and sum(pads) == 0: + pads = pads[0: 6] if len(pads) in [2, 4, 6]: if data_shape: assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW @@ -424,7 +426,7 @@ class OpSet9(): (2, -1)).transpose().astype("int32") paddings = np.flip(paddings, axis=0).flatten().tolist() layer_attrs['pad'] = paddings - layer_attrs['data_format'] = data_format + layer_attrs['data_format'] = string(data_format) else: if data_shape: assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW @@ -694,11 +696,14 @@ class OpSet9(): inputs={'x': name_trans, 'index': indices.name}, outputs=[node.name]) + new_perm = [0] * len(perm) + for i in range(len(perm)): + new_perm[perm[i]] = i self.paddle_graph.add_layer( 'paddle.transpose', inputs={"x": node.name}, outputs=[node.name], - perm=perm) + perm=new_perm) if len(indices_shape) < 1: self.paddle_graph.add_layer( 'paddle.squeeze', @@ -770,11 +775,15 @@ class OpSet9(): 'index': indices_reshape}, outputs=[node.name]) input_transpose = node.name + '_transpose' + new_perm = [0] * len(perm) + for i in range(len(perm)): + new_perm[perm[i]] = i self.paddle_graph.add_layer( 'paddle.transpose', inputs={"x": node.name}, outputs=[input_transpose], - perm=perm) + perm=new_perm) + perm = new_perm val_x_shape = val_x.out_shapes[0] reshaped_shape = [] for i in perm: diff --git a/x2paddle/optimizer/optimizer.py b/x2paddle/optimizer/optimizer.py index 850677b5fcd2b3556762bbec2e4bc611e273d4f0..448449b68389ce577ddcaedac8115abc71f1cdf7 100644 --- a/x2paddle/optimizer/optimizer.py +++ b/x2paddle/optimizer/optimizer.py @@ -60,7 +60,7 @@ class GraphOptimizer(object): def optimize(self, graph): for pass_name in self.passes: pass_ = PassManager.lookup(pass_name)() - if pass_name.endswith("_eliminate_pass"): + if pass_name.endswith("_eliminate_pass") or pass_name.endswith("_conv2d_add_fuse_pass"): pass_.apply(graph) else: while True: diff --git a/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py b/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py index 6e7e4afed51cf04334df880c2b9d0adcefb2e7c2..5ae708b688032423ba1eed62d067cd95f4a2e83e 100644 --- a/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py +++ b/x2paddle/optimizer/pytorch_code_optimizer/hierachical_tree.py @@ -383,9 +383,9 @@ class HierarchicalTree(Tree): input_data_name = ', '.join(self.pd_graph.inputs) run_func_list = list() run_func_list.append("def main({}):".format(input_data_name)) - run_func_list.append(" # 共{}个输入".format(len(self.pd_graph.inputs_info))) + run_func_list.append(" # There are {} inputs.".format(len(self.pd_graph.inputs_info))) for k, v in self.pd_graph.inputs_info.items(): - run_func_list.append(" # {}: 形状为{},类型为{}。".format(k, v[0], v[1])) + run_func_list.append(" # {}: shape-{},type-{}。".format(k, v[0], v[1])) run_func_list.extend( [" paddle.disable_static()", " params = paddle.load('{}/model.pdparams')".format(osp.abspath(save_dir)), diff --git a/x2paddle/optimizer/pytorch_code_optimizer/layer_code_generator.py b/x2paddle/optimizer/pytorch_code_optimizer/layer_code_generator.py index a4c368cdac3d3ab5e194f577daf8c1a4d0c490b9..ab0603523a41d8a0999887035ca717675a6ca125 100644 --- a/x2paddle/optimizer/pytorch_code_optimizer/layer_code_generator.py +++ b/x2paddle/optimizer/pytorch_code_optimizer/layer_code_generator.py @@ -27,6 +27,7 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn", "paddle.nn.Linear": "linear", "paddle.nn.Conv2DTranspose": "conv", "paddle.nn.LSTM": "lstm", + "paddle.nn.PReLU": "prelu", "paddle.nn.ReLU": "relu", "paddle.nn.ReLU6": "relu", "paddle.nn.Softmax": "softmax", @@ -41,7 +42,7 @@ NN_KERNEL_NAME = {"paddle.nn.BatchNorm": "bn", "paddle.nn.GELU": "gelu", "paddle.nn.Hardtanh": "tanh", "paddle.nn.LeakyReLU": "leakly_relu"} -NN_KERNEL_WITH_PARAMS = list(NN_KERNEL_NAME.keys())[:7] +NN_KERNEL_WITH_PARAMS = list(NN_KERNEL_NAME.keys())[:8] def rename_layers(layers, param_tree=None, is_rename_module=False): """ 对子模块的输入输出等进行重命名。 diff --git a/x2paddle/optimizer/pytorch_code_optimizer/module_graph.py b/x2paddle/optimizer/pytorch_code_optimizer/module_graph.py index 54e2cbd34dd0111d3cd4b91aeffb7a255201a829..b6fb8f7547ff8ccf81959f62c81741f6e8a7ca5e 100644 --- a/x2paddle/optimizer/pytorch_code_optimizer/module_graph.py +++ b/x2paddle/optimizer/pytorch_code_optimizer/module_graph.py @@ -349,9 +349,9 @@ class ModuleGraph(object): input_data_name = ', '.join(self.pd_graph.inputs) run_func_list = list() run_func_list.append("def main({}):".format(input_data_name)) - run_func_list.append(" # 共{}个输入".format(len(self.pd_graph.inputs_info))) + run_func_list.append(" # There are {} inputs.".format(len(self.pd_graph.inputs_info))) for k, v in self.pd_graph.inputs_info.items(): - run_func_list.append(" # {}: 形状为{},类型为{}。".format(k, v[0], v[1])) + run_func_list.append(" # {}: shape-{},type-{}.".format(k, v[0], v[1])) run_func_list.extend( [" paddle.disable_static()", " params = paddle.load('{}/model.pdparams')".format(osp.abspath(save_dir)),