From e1c0461dfeee8614b76c8aff80e3506645e1b9ad Mon Sep 17 00:00:00 2001 From: Tony Cao <57024921+caolonghao@users.noreply.github.com> Date: Thu, 20 Oct 2022 11:20:33 +0800 Subject: [PATCH] [CodeStyle][W605] Add escape symbols to some strings (#46752) * Fix W605 in tools folder by adding escape symbols * Fix W605 in incubate and some other folders * Fix W605 in /fluid/test folders * Update tools/analysisPyXml.py Co-authored-by: Nyakku Shigure * Add some changes to manual and auto escape symbols * revert changes in transformer.py * Fix new code with W605 error: add escape symbols * revert changes in transformer.py * revert changes in transformer.py Co-authored-by: Nyakku Shigure --- .../generator/codegen_utils.py | 6 ++--- python/paddle/dataset/common.py | 2 +- .../group_sharded_optimizer_stage2.py | 2 +- python/paddle/distribution/gumbel.py | 6 ++--- python/paddle/distribution/laplace.py | 2 +- .../test_conv_act_mkldnn_fuse_pass.py | 2 +- .../test_conv_bias_mkldnn_fuse_pass.py | 2 +- ...est_conv_elementwise_add2_act_fuse_pass.py | 2 +- ...test_conv_elementwise_add_act_fuse_pass.py | 2 +- .../test_conv_eltwiseadd_bn_fuse_pass.py | 2 +- .../test_conv_transpose_bn_fuse_pass.py | 2 +- ..._conv_transpose_eltwiseadd_bn_fuse_pass.py | 2 +- .../test_emb_eltwise_layernorm_fuse_pass.py | 2 +- ...test_fc_elementwise_layernorm_fuse_pass.py | 2 +- .../ir/inference/test_fc_fuse_pass.py | 2 +- .../test_flatten2_matmul_fuse_pass.py | 2 +- .../ir/inference/test_layer_norm_fuse_pass.py | 2 +- .../inference/test_map_matmul_to_mul_pass.py | 2 +- .../test_map_matmul_v2_to_matmul_pass.py | 2 +- .../test_map_matmul_v2_to_mul_pass.py | 2 +- .../inference/test_matmul_scale_fuse_pass.py | 2 +- .../test_matmul_v2_scale_fuse_pass.py | 2 +- .../test_mkldnn_depthwise_conv_pass.py | 2 +- .../test_reshape2_matmul_fuse_pass.py | 2 +- .../test_squeeze2_matmul_fuse_pass.py | 2 +- ...test_transpose_flatten_concat_fuse_pass.py | 2 +- .../test_trt_flatten2_matmul_fuse_pass.py | 2 +- .../test_trt_squeeze2_matmul_fuse_pass.py | 2 +- .../test_unsqueeze2_eltwise_fuse_pass.py | 2 +- .../paddle/incubate/nn/layer/fused_linear.py | 2 +- .../incubate/nn/layer/fused_transformer.py | 2 +- .../sparse/nn/functional/activation.py | 4 +-- .../incubate/sparse/nn/layer/activation.py | 4 +-- python/paddle/incubate/sparse/unary.py | 4 +-- python/paddle/nn/functional/pooling.py | 4 +-- python/paddle/profiler/profiler_statistic.py | 2 +- python/paddle/tensor/random.py | 6 ++--- tools/analysisPyXml.py | 2 +- tools/get_pr_ut.py | 8 +++--- tools/get_ut_file_map.py | 2 +- ...rate_pd_op_dialect_from_paddle_op_maker.py | 2 +- tools/infrt/generate_phi_kernel_dialect.py | 2 +- tools/infrt/get_compat_kernel_signature.py | 2 +- tools/jetson_infer_op.py | 6 ++--- tools/prune_for_jetson.py | 9 ++++--- tools/remove_grad_op_and_kernel.py | 26 +++++++++---------- 46 files changed, 77 insertions(+), 76 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py b/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py index 5206cdd675..91a16a0fdf 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py +++ b/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py @@ -295,7 +295,7 @@ def ParseYamlForwardFromBackward(string): wspace = r'\s*' fargs = r'(.*?)' frets = r'(.*)' - pattern = f'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}' + pattern = fr'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}' m = re.search(pattern, string) function_name = m.group(1) @@ -314,7 +314,7 @@ def ParseYamlForward(args_str, returns_str): fargs = r'(.*?)' wspace = r'\s*' - args_pattern = f'^\({fargs}\)$' + args_pattern = fr'^\({fargs}\)$' args_str = re.search(args_pattern, args_str.strip()).group(1) inputs_list, attrs_list = ParseYamlArgs(args_str) @@ -329,7 +329,7 @@ def ParseYamlBackward(args_str, returns_str): fargs = r'(.*?)' wspace = r'\s*' - args_pattern = f'\({fargs}\)' + args_pattern = fr'\({fargs}\)' args_str = re.search(args_pattern, args_str).group(1) inputs_list, attrs_list = ParseYamlArgs(args_str) diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index b8004be3a4..97e986c700 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -32,7 +32,7 @@ HOME = os.path.expanduser('~') # If the default HOME dir does not support writing, we # will create a temporary folder to store the cache files. if not os.access(HOME, os.W_OK): - """ + r""" gettempdir() return the name of the directory used for temporary files. On Windows, the directories C:\TEMP, C:\TMP, \TEMP, and \TMP, in that order. On all other platforms, the directories /tmp, /var/tmp, and /usr/tmp, in that order. diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py index ed07e8b798..7981375649 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py @@ -211,7 +211,7 @@ class GroupShardedOptimizerStage2(Optimizer): if self._broadcast_order_params is None: # Params' names should be like column_linear_32.w_0 patter to get the best performance. warnings.warn( - "The param name passed to the optimizer doesn't follow .+_[0-9]+\..+ patter, " + r"The param name passed to the optimizer doesn't follow .+_[0-9]+\..+ patter, " "overlap broadcast may harm the performance.") self._broadcast_order_params = self._local_params diff --git a/python/paddle/distribution/gumbel.py b/python/paddle/distribution/gumbel.py index 7c9aebc652..225e327cc9 100644 --- a/python/paddle/distribution/gumbel.py +++ b/python/paddle/distribution/gumbel.py @@ -98,7 +98,7 @@ class Gumbel(TransformedDistribution): @property def mean(self): - """Mean of distribution + r"""Mean of distribution The mean is @@ -120,7 +120,7 @@ class Gumbel(TransformedDistribution): @property def variance(self): - """Variance of distribution. + r"""Variance of distribution. The variance is @@ -144,7 +144,7 @@ class Gumbel(TransformedDistribution): @property def stddev(self): - """Standard deviation of distribution + r"""Standard deviation of distribution The standard deviation is diff --git a/python/paddle/distribution/laplace.py b/python/paddle/distribution/laplace.py index 1796f50893..2b117f93ba 100644 --- a/python/paddle/distribution/laplace.py +++ b/python/paddle/distribution/laplace.py @@ -105,7 +105,7 @@ class Laplace(distribution.Distribution): @property def variance(self): - """Variance of distribution. + r"""Variance of distribution. The variance is diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py index 4cef0a1518..d619438de4 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestConvActMkldnnFusePass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py index dd033b1178..eb02c6e1c7 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestConvBiasMkldnnFusePass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py index 2279db872c..d08da522e1 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py @@ -23,7 +23,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TestConvElementwiseAdd2ActPass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py index a540d94c63..7c1ae063ce 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py @@ -22,7 +22,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TestConvElementwiseAddActPass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py index 1fb0612c40..a88150aecb 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py @@ -24,7 +24,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TestConvEltwiseaddBnFusePass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py index 4349d5d47d..90d81b5047 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestConvTransposeBnFusePass(PassAutoScanTest): - ''' + r''' conv_input conv_weight_var(persistable) \ / conv_op diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py index bdbe057ff2..91d9b863e2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest): - ''' + r''' conv_input conv_weight_var(persistable) \ / conv_op diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py index 87e894cda0..ce51cda6e4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py @@ -23,7 +23,7 @@ import hypothesis.strategies as st class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): - ''' + r''' in_var1 emb_var in_var2 emb_var in_var3 emb_var in_var emb_var | | | | | | | | lookup_table lookup_table lookup_table ... lookup_table diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py index 23b84df574..b15fb88d73 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py @@ -34,7 +34,7 @@ class FcElementLayernormFusePassDataGen: class TestFCElementwiseLayerNormFusePass(PassAutoScanTest): - """ + r""" x_var w(persistable) bias_var(persistable) \ | / fc diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py index 662acf9358..58494fba43 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py @@ -22,7 +22,7 @@ import hypothesis.strategies as st class TestFcFusePass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / mul bias_var(persistable) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py index da27ae739f..21e0843a5a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestFlatten2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | flatten2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py index 81e12958f0..9b928436b5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestFcFusePass(PassAutoScanTest): - """ + r""" x_var / \ / reduce_mean "u(x)" diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py index 0f1b2b2056..e6db120489 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / matmul diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py index a171d499ca..38f86dc717 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / matmul_v2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py index 71bd77c7b1..d4c6db1c7f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / matmul_v2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py index e3597e13c5..6db5468292 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMatmulScaleFusePass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / matmul diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py index ef5406c7bc..a9b48e7cf3 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMatmulV2ScaleFusePass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) x_var y_var*scale(persistable) \ / \ / matmul_v2 matmul_v2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py index ebfb0270f6..736c5fca7f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class DepthwiseConvMKLDNNPass(PassAutoScanTest): - ''' + r''' conv_input conv_weight_var(persistable) \ / conv_op diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py index b1311b1298..7de2491e69 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestReshape2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | reshape2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py index ea4fee32a3..8deee7c151 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestSqueeze2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | squeeze2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py index dfd80ee7f7..41145be9bc 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestTransposeFlattenConcatFusePass(PassAutoScanTest): - """ + r""" x_1_var x_2_var | | transpose2 transpose2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py index 14f50e280e..698eae07cc 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestFlatten2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | flatten2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py index 3d51197928..54c1f72268 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestSqueeze2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | squeeze2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py index 698f8d772a..7ea2b494f3 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestUnsqueezeEltwiseFusePass(PassAutoScanTest): - """ + r""" y_var | unsqueeze2 diff --git a/python/paddle/incubate/nn/layer/fused_linear.py b/python/paddle/incubate/nn/layer/fused_linear.py index 65535d9318..4a5f2c1210 100644 --- a/python/paddle/incubate/nn/layer/fused_linear.py +++ b/python/paddle/incubate/nn/layer/fused_linear.py @@ -17,7 +17,7 @@ from paddle.incubate.nn import functional as F class FusedLinear(Layer): - """ + r""" Linear layer takes only one multi-dimensional tensor as input with the shape :math:`[batch\_size, *, in\_features]` , where :math:`*` means any number of additional dimensions. It multiplies input tensor with the weight diff --git a/python/paddle/incubate/nn/layer/fused_transformer.py b/python/paddle/incubate/nn/layer/fused_transformer.py index f9cb7da823..ca58f81e19 100644 --- a/python/paddle/incubate/nn/layer/fused_transformer.py +++ b/python/paddle/incubate/nn/layer/fused_transformer.py @@ -1246,7 +1246,7 @@ class FusedMultiTransformer(Layer): caches=None, pre_caches=None, time_step=None): - """ + r""" Applies multi transformer layers on the input. Parameters: diff --git a/python/paddle/incubate/sparse/nn/functional/activation.py b/python/paddle/incubate/sparse/nn/functional/activation.py index 0e83abd239..9e4db7e1c4 100644 --- a/python/paddle/incubate/sparse/nn/functional/activation.py +++ b/python/paddle/incubate/sparse/nn/functional/activation.py @@ -61,7 +61,7 @@ def relu(x, name=None): @dygraph_only def softmax(x, axis=-1, name=None): - """ + r""" sparse softmax activation, requiring x to be a SparseCooTensor or SparseCsrTensor. Note: @@ -146,7 +146,7 @@ def relu6(x, name=None): @dygraph_only def leaky_relu(x, negative_slope=0.01, name=None): - """ + r""" sparse leaky_relu activation, requiring x to be a SparseCooTensor or SparseCsrTensor. .. math:: diff --git a/python/paddle/incubate/sparse/nn/layer/activation.py b/python/paddle/incubate/sparse/nn/layer/activation.py index bcbf27fab9..da5c5225ce 100644 --- a/python/paddle/incubate/sparse/nn/layer/activation.py +++ b/python/paddle/incubate/sparse/nn/layer/activation.py @@ -59,7 +59,7 @@ class ReLU(Layer): class Softmax(Layer): - """ + r""" Sparse Softmax Activation, requiring x to be a SparseCooTensor or SparseCsrTensor. Note: @@ -164,7 +164,7 @@ class ReLU6(Layer): class LeakyReLU(Layer): - """ + r""" Sparse Leaky ReLU Activation, requiring x to be a SparseCooTensor or SparseCsrTensor. .. math:: diff --git a/python/paddle/incubate/sparse/unary.py b/python/paddle/incubate/sparse/unary.py index 3ebd84d8ed..b7ba1536f1 100644 --- a/python/paddle/incubate/sparse/unary.py +++ b/python/paddle/incubate/sparse/unary.py @@ -548,7 +548,7 @@ def coalesce(x): @dygraph_only def rad2deg(x, name=None): - """ + r""" Convert each of the elements of input x from radian to degree, requiring x to be a SparseCooTensor or SparseCsrTensor. @@ -581,7 +581,7 @@ def rad2deg(x, name=None): @dygraph_only def deg2rad(x, name=None): - """ + r""" Convert each of the elements of input x from degree to radian, requiring x to be a SparseCooTensor or SparseCsrTensor. diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 43c53c56e4..7e465b5974 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -1381,7 +1381,7 @@ def adaptive_avg_pool1d(x, output_size, name=None): def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): - """ + r""" Applies 2D adaptive avg pooling on input tensor. The h and w dimensions of the output tensor are determined by the parameter output_size. @@ -1502,7 +1502,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): - """ + r""" This operation applies 3D adaptive avg pooling on input tensor. The h and w dimensions of the output tensor are determined by the parameter output_size. diff --git a/python/paddle/profiler/profiler_statistic.py b/python/paddle/profiler/profiler_statistic.py index c0dc7fea44..20465f1ae8 100755 --- a/python/paddle/profiler/profiler_statistic.py +++ b/python/paddle/profiler/profiler_statistic.py @@ -1399,7 +1399,7 @@ def _build_table(statistic_data, append(header_sep) append(row_format.format(*headers)) append(header_sep) - kernel_name_pattern = re.compile('(.+?)(<.*>)(\(.*\))') + kernel_name_pattern = re.compile(r'(.+?)(<.*>)(\(.*\))') for row_values in all_row_values: match = kernel_name_pattern.match(row_values[0]) if match: diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 10e0f6f775..208139b1ab 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -28,13 +28,13 @@ __all__ = [] def bernoulli(x, name=None): - """ + r""" For each element :math:`x_i` in input ``x``, take a sample from the Bernoulli distribution, also called two-point distribution, with success probability :math:`x_i`. The Bernoulli distribution with success probability :math:`x_i` is a discrete probability distribution with probability mass function .. math:: - p(y)=\\begin{cases} - x_i,&y=1\\\\ + p(y)=\begin{cases} + x_i,&y=1\\ 1-x_i,&y=0 \end{cases}. diff --git a/tools/analysisPyXml.py b/tools/analysisPyXml.py index bea07e3633..b9280124ae 100644 --- a/tools/analysisPyXml.py +++ b/tools/analysisPyXml.py @@ -45,7 +45,7 @@ def analysisPyXml(rootPath, ut): '@', '\'\'\'', 'logger', '_logger', 'logging', 'r"""', 'pass', 'try', 'except', 'if __name__ == "__main__"')) == False: - pattern = "(.*) = ('*')|(.*) = (\"*\")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()" #a='b'/a="b"/a=0 + pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" #a='b'/a="b"/a=0 if re.match(pattern, output.strip()) == None: pyCov_file.append(clazz_filename) coverageMessage = 'RELATED' diff --git a/tools/get_pr_ut.py b/tools/get_pr_ut.py index f54f153eb4..efe21b0639 100644 --- a/tools/get_pr_ut.py +++ b/tools/get_pr_ut.py @@ -36,12 +36,12 @@ class PRChecker(object): def __init__(self): self.github = Github(os.getenv('GITHUB_API_TOKEN'), timeout=60) self.repo = self.github.get_repo('PaddlePaddle/Paddle') - self.py_prog_oneline = re.compile('\d+\|\s*#.*') + self.py_prog_oneline = re.compile(r'\d+\|\s*#.*') self.py_prog_multiline_a = re.compile('"""(.*?)"""', re.DOTALL) self.py_prog_multiline_b = re.compile("'''(.*?)'''", re.DOTALL) - self.cc_prog_online = re.compile('\d+\|\s*//.*') - self.cc_prog_multiline = re.compile('\d+\|\s*/\*.*?\*/', re.DOTALL) - self.lineno_prog = re.compile('@@ \-\d+,\d+ \+(\d+),(\d+) @@') + self.cc_prog_online = re.compile(r'\d+\|\s*//.*') + self.cc_prog_multiline = re.compile(r'\d+\|\s*/\*.*?\*/', re.DOTALL) + self.lineno_prog = re.compile(r'@@ \-\d+,\d+ \+(\d+),(\d+) @@') self.pr = None self.suffix = '' self.full_case = False diff --git a/tools/get_ut_file_map.py b/tools/get_ut_file_map.py index 220bafa7e5..5afc6b03f2 100644 --- a/tools/get_ut_file_map.py +++ b/tools/get_ut_file_map.py @@ -34,7 +34,7 @@ def get_all_paddle_file(rootPath): def get_all_uts(rootPath): all_uts_paddle = '%s/build/all_uts_paddle' % rootPath os.system( - 'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s' + r'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s' % (rootPath, all_uts_paddle)) diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index 087c88d937..7b97a06619 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -217,7 +217,7 @@ def convert_op_proto_into_mlir(op_descs): |* Automatically generated file, do not edit! *|\n\ |* Generated by tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py *|\n\ |* *|\n\ -\*===----------------------------------------------------------------------===*/\n" +\\*===----------------------------------------------------------------------===*/\n" lines = [ "#ifndef PD_OPS", diff --git a/tools/infrt/generate_phi_kernel_dialect.py b/tools/infrt/generate_phi_kernel_dialect.py index cde7f44674..686a5e12a6 100644 --- a/tools/infrt/generate_phi_kernel_dialect.py +++ b/tools/infrt/generate_phi_kernel_dialect.py @@ -261,7 +261,7 @@ def generate_dialect_head(): |* Automatically generated file, do not edit! *|\n\ |* Generated by tools/infrt/generate_pten_kernel_dialect.py *|\n\ |* *|\n\ -\*===----------------------------------------------------------------------===*/\n" +\\*===----------------------------------------------------------------------===*/\n" includes_ = "#ifndef PTEN_KERNELS\n\ #define PTEN_KERNELS\n\ diff --git a/tools/infrt/get_compat_kernel_signature.py b/tools/infrt/get_compat_kernel_signature.py index 12e7285980..104d3ae30c 100644 --- a/tools/infrt/get_compat_kernel_signature.py +++ b/tools/infrt/get_compat_kernel_signature.py @@ -65,7 +65,7 @@ def get_compat_kernels_info(): data = content.replace("\n", "").replace( " ", "").strip("return").strip("KernelSignature(").strip( - "\);").replace("\"", "").replace("\\", "") + r"\);").replace("\"", "").replace("\\", "") registry = False if is_grad_kernel(data): continue diff --git a/tools/jetson_infer_op.py b/tools/jetson_infer_op.py index c84d7d50d6..e132a14373 100644 --- a/tools/jetson_infer_op.py +++ b/tools/jetson_infer_op.py @@ -155,7 +155,7 @@ def set_diff_value(file, atol="1e-5", inplace_atol="1e-7"): :param inplace_atol: :return: """ - os.system("sed -i 's/self.check_output(/self\.check_output\(atol=" + atol + + os.system(r"sed -i 's/self.check_output(/self\.check_output\(atol=" + atol + ",inplace_atol=" + inplace_atol + ",/g\' " + file) @@ -179,8 +179,8 @@ def change_op_file(start=0, end=0, op_list_file='list_op.txt', path='.'): file_with_path = file_path[0] # pattern pattern_import = ".*import OpTest.*" - pattern_skip = "^class .*\(OpTest\):$" - pattern_return = "def test.*grad.*\):$" + pattern_skip = r"^class .*\(OpTest\):$" + pattern_return = r"def test.*grad.*\):$" # change file add_import_skip_return(file_with_path, pattern_import, pattern_skip, pattern_return) diff --git a/tools/prune_for_jetson.py b/tools/prune_for_jetson.py index e91b3b840d..e87268a9fe 100644 --- a/tools/prune_for_jetson.py +++ b/tools/prune_for_jetson.py @@ -77,7 +77,7 @@ def prune_phi_kernels(): all_matches = [] with open(op_file, 'r', encoding='utf-8') as f: content = ''.join(f.readlines()) - op_pattern = 'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}' + op_pattern = r'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}' op, op_count = find_kernel(content, op_pattern) register_op_count += op_count all_matches.extend(op) @@ -143,11 +143,12 @@ def append_fluid_kernels(): for op in op_white_list: patterns = { - "REGISTER_OPERATOR": "REGISTER_OPERATOR\(\s*%s\s*," % op, + "REGISTER_OPERATOR": + r"REGISTER_OPERATOR\(\s*%s\s*," % op, "REGISTER_OP_CPU_KERNEL": - "REGISTER_OP_CPU_KERNEL\(\s*%s\s*," % op, + r"REGISTER_OP_CPU_KERNEL\(\s*%s\s*," % op, "REGISTER_OP_CUDA_KERNEL": - "REGISTER_OP_CUDA_KERNEL\(\s*%s\s*," % op + r"REGISTER_OP_CUDA_KERNEL\(\s*%s\s*," % op } for k, p in patterns.items(): matches = re.findall(p, content, flags=re.DOTALL) diff --git a/tools/remove_grad_op_and_kernel.py b/tools/remove_grad_op_and_kernel.py index 08f52fc604..19778c27db 100644 --- a/tools/remove_grad_op_and_kernel.py +++ b/tools/remove_grad_op_and_kernel.py @@ -43,7 +43,7 @@ def remove_grad_op_and_kernel(content, pattern1, pattern2): def update_operator_cmake(cmake_file): pat1 = 'add_subdirectory(optimizers)' - pat2 = 'register_operators\(EXCLUDES.*?py_func_op.*?\)' + pat2 = r'register_operators\(EXCLUDES.*?py_func_op.*?\)' code1 = 'if(ON_INFER)\nadd_subdirectory(optimizers)\nendif()' code2 = 'if(ON_INFER)\nfile(GLOB LOSS_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*loss_op.cc")\nstring(REPLACE ".cc" "" LOSS_OPS "${LOSS_OPS}")\nendif()' @@ -80,27 +80,27 @@ if __name__ == '__main__': # 1. remove all grad op and kernel for op_file in all_op: # remove all grad op - op_pattern1 = 'REGISTER_OPERATOR\(.*?\);?' - op_pattern2 = 'REGISTER_OPERATOR\(.*?_grad,.*?\);?' + op_pattern1 = r'REGISTER_OPERATOR\(.*?\);?' + op_pattern2 = r'REGISTER_OPERATOR\(.*?_grad,.*?\);?' # remove all cpu grad kernel - cpu_kernel_pattern1 = 'REGISTER_OP_CPU_KERNEL\(.*?\);?' - cpu_kernel_pattern2 = 'REGISTER_OP_CPU_KERNEL\(.*?_grad,.*?\);?' + cpu_kernel_pattern1 = r'REGISTER_OP_CPU_KERNEL\(.*?\);?' + cpu_kernel_pattern2 = r'REGISTER_OP_CPU_KERNEL\(.*?_grad,.*?\);?' # remove all gpu grad kernel - gpu_kernel_pattern1 = 'REGISTER_OP_CUDA_KERNEL\(.*?\);?' - gpu_kernel_pattern2 = 'REGISTER_OP_CUDA_KERNEL\(.*?_grad,.*?\);?' + gpu_kernel_pattern1 = r'REGISTER_OP_CUDA_KERNEL\(.*?\);?' + gpu_kernel_pattern2 = r'REGISTER_OP_CUDA_KERNEL\(.*?_grad,.*?\);?' # remove all xpu grad kernel - xpu_kernel_pattern1 = 'REGISTER_OP_XPU_KERNEL\(.*?\);?' - xpu_kernel_pattern2 = 'REGISTER_OP_XPU_KERNEL\(.*?_grad,.*?\);?' + xpu_kernel_pattern1 = r'REGISTER_OP_XPU_KERNEL\(.*?\);?' + xpu_kernel_pattern2 = r'REGISTER_OP_XPU_KERNEL\(.*?_grad,.*?\);?' # remove custom grad kernel, mkldnn or cudnn etc. - op_kernel_pattern1 = 'REGISTER_OP_KERNEL\(.*?\);?' - op_kernel_pattern2 = 'REGISTER_OP_KERNEL\(.*?_grad,.*?\);?' + op_kernel_pattern1 = r'REGISTER_OP_KERNEL\(.*?\);?' + op_kernel_pattern2 = r'REGISTER_OP_KERNEL\(.*?_grad,.*?\);?' - custom_pattern1 = 'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?\);?' - custom_pattern2 = 'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?_grad,.*?\);?' + custom_pattern1 = r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?\);?' + custom_pattern2 = r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?_grad,.*?\);?' op_name = os.path.split(op_file)[1] if op_name in spec_ops: -- GitLab