diff --git a/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py b/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py index 5206cdd6751b98d07062e251f787ff14e1235871..91a16a0fdf05f31824796522530777003e910abc 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py +++ b/paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py @@ -295,7 +295,7 @@ def ParseYamlForwardFromBackward(string): wspace = r'\s*' fargs = r'(.*?)' frets = r'(.*)' - pattern = f'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}' + pattern = fr'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}' m = re.search(pattern, string) function_name = m.group(1) @@ -314,7 +314,7 @@ def ParseYamlForward(args_str, returns_str): fargs = r'(.*?)' wspace = r'\s*' - args_pattern = f'^\({fargs}\)$' + args_pattern = fr'^\({fargs}\)$' args_str = re.search(args_pattern, args_str.strip()).group(1) inputs_list, attrs_list = ParseYamlArgs(args_str) @@ -329,7 +329,7 @@ def ParseYamlBackward(args_str, returns_str): fargs = r'(.*?)' wspace = r'\s*' - args_pattern = f'\({fargs}\)' + args_pattern = fr'\({fargs}\)' args_str = re.search(args_pattern, args_str).group(1) inputs_list, attrs_list = ParseYamlArgs(args_str) diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index b8004be3a41b57f9f83f268ec73da6df4d66429f..97e986c70068a8a24f392ced0e8ad2866b0d6624 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -32,7 +32,7 @@ HOME = os.path.expanduser('~') # If the default HOME dir does not support writing, we # will create a temporary folder to store the cache files. if not os.access(HOME, os.W_OK): - """ + r""" gettempdir() return the name of the directory used for temporary files. On Windows, the directories C:\TEMP, C:\TMP, \TEMP, and \TMP, in that order. On all other platforms, the directories /tmp, /var/tmp, and /usr/tmp, in that order. diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py index ed07e8b79822d8d02c1394b4f6418575cd1ba54a..798137564904003fa8c4fd75698928615e624612 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_optimizer_stage2.py @@ -211,7 +211,7 @@ class GroupShardedOptimizerStage2(Optimizer): if self._broadcast_order_params is None: # Params' names should be like column_linear_32.w_0 patter to get the best performance. warnings.warn( - "The param name passed to the optimizer doesn't follow .+_[0-9]+\..+ patter, " + r"The param name passed to the optimizer doesn't follow .+_[0-9]+\..+ patter, " "overlap broadcast may harm the performance.") self._broadcast_order_params = self._local_params diff --git a/python/paddle/distribution/gumbel.py b/python/paddle/distribution/gumbel.py index 7c9aebc652022cd519f115fcc12f172564660ba5..225e327cc91e9a64e82b3a7c22b971a00b3b5efe 100644 --- a/python/paddle/distribution/gumbel.py +++ b/python/paddle/distribution/gumbel.py @@ -98,7 +98,7 @@ class Gumbel(TransformedDistribution): @property def mean(self): - """Mean of distribution + r"""Mean of distribution The mean is @@ -120,7 +120,7 @@ class Gumbel(TransformedDistribution): @property def variance(self): - """Variance of distribution. + r"""Variance of distribution. The variance is @@ -144,7 +144,7 @@ class Gumbel(TransformedDistribution): @property def stddev(self): - """Standard deviation of distribution + r"""Standard deviation of distribution The standard deviation is diff --git a/python/paddle/distribution/laplace.py b/python/paddle/distribution/laplace.py index 1796f50893e60831c5612231e357c171ea20e52a..2b117f93bacec87526b28b9b570b29f88d75a521 100644 --- a/python/paddle/distribution/laplace.py +++ b/python/paddle/distribution/laplace.py @@ -105,7 +105,7 @@ class Laplace(distribution.Distribution): @property def variance(self): - """Variance of distribution. + r"""Variance of distribution. The variance is diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py index 4cef0a1518cc8c145e3b8e5d2380ba28479f21ce..d619438de4ec08be5a30a07d70e9502536094ece 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_act_mkldnn_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestConvActMkldnnFusePass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py index dd033b11783f7c53e5ccbb5254a9b9ce0d66ee1d..eb02c6e1c77ca2c45d1c8856e6c273662b3276df 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bias_mkldnn_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestConvBiasMkldnnFusePass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py index 2279db872c64d8c594f772a7b4b9f081aa99b555..d08da522e1a3ede893c442b207d349632d6aed1f 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add2_act_fuse_pass.py @@ -23,7 +23,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TestConvElementwiseAdd2ActPass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py index a540d94c63062eee1e17c357a995bf8b52e78f5a..7c1ae063ce84979b0c81b517a4110e51e76e4cae 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_act_fuse_pass.py @@ -22,7 +22,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TestConvElementwiseAddActPass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py index 1fb0612c40243a3b876919f82ee8383b86721f3b..a88150aecbf3b653aac378a41887d1a4cbb1ed05 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_bn_fuse_pass.py @@ -24,7 +24,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0' class TestConvEltwiseaddBnFusePass(PassAutoScanTest): - """ + r""" x_var f_var(persistable) \ / conv2d diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py index 4349d5d47d960ee5a61a42b6dbb44000b6f0d716..90d81b50471c03be0dfbdb3b494336d521b2d7eb 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_bn_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestConvTransposeBnFusePass(PassAutoScanTest): - ''' + r''' conv_input conv_weight_var(persistable) \ / conv_op diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py index bdbe057ff221eded75571e140c9c165111d73d70..91d9b863e2f9a2065808bae3984dcb1e7f8bf653 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_transpose_eltwiseadd_bn_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest): - ''' + r''' conv_input conv_weight_var(persistable) \ / conv_op diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py index 87e894cda0664ff2cc13baba52381af8c905e538..ce51cda6e45a8fc41994d9e6c60841e36a235be1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py @@ -23,7 +23,7 @@ import hypothesis.strategies as st class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): - ''' + r''' in_var1 emb_var in_var2 emb_var in_var3 emb_var in_var emb_var | | | | | | | | lookup_table lookup_table lookup_table ... lookup_table diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py index 23b84df5741dda4fe654043f133ebc90d12cb2a2..b15fb88d73d6eea9463021c6fc2f61ae1074c38f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_elementwise_layernorm_fuse_pass.py @@ -34,7 +34,7 @@ class FcElementLayernormFusePassDataGen: class TestFCElementwiseLayerNormFusePass(PassAutoScanTest): - """ + r""" x_var w(persistable) bias_var(persistable) \ | / fc diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py index 662acf9358386f21805afc682433fe3743ba2ddc..58494fba43c254175ddc3351d976a74ebd3e69e1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_fc_fuse_pass.py @@ -22,7 +22,7 @@ import hypothesis.strategies as st class TestFcFusePass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / mul bias_var(persistable) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py index da27ae739fe3dc9162aac71d5d8ae4393e6ec43e..21e0843a5aab65228b91110da940469915e0279d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_flatten2_matmul_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestFlatten2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | flatten2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py index 81e12958f0361efa26cc796aa313b8b71f2efa8a..9b928436b59986ca734378b5f2f1e5d84729f694 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_layer_norm_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestFcFusePass(PassAutoScanTest): - """ + r""" x_var / \ / reduce_mean "u(x)" diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py index 0f1b2b20568bb9ec4b1019d54cbf2c5b50d40752..e6db1204897c51c40800a4bfbee7bea4d8586861 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_to_mul_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / matmul diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py index a171d499cabf5736bbfb02345fe1adf6ed170546..38f86dc71702c6d5bf629cc6697733ded867cdaf 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_matmul_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / matmul_v2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py index 71bd77c7b145edc14bdc84db43dc06e86a59a7ba..d4c6db1c7f5d4113d630e91674092827d2aef3af 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_map_matmul_v2_to_mul_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMapMatmulToMulPass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / matmul_v2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py index e3597e13c589c49ff6fd5606028b2dd40dd01990..6db5468292c4e62f2b62b3bfd76d894ef3077fa6 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_scale_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMatmulScaleFusePass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) \ / matmul diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py index ef5406c7bca873a201d7a302c5c9ee06c7b594c4..a9b48e7cf362750d6da8c653ca32b7a860c1667f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_matmul_v2_scale_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestMatmulV2ScaleFusePass(PassAutoScanTest): - """ + r""" x_var y_var(persistable) x_var y_var*scale(persistable) \ / \ / matmul_v2 matmul_v2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py index ebfb0270f67052fe72f047be5bd11a171dabedff..736c5fca7f8f09cb7aa067875c9528782da0bc4c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_depthwise_conv_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class DepthwiseConvMKLDNNPass(PassAutoScanTest): - ''' + r''' conv_input conv_weight_var(persistable) \ / conv_op diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py index b1311b129831fb73cb805a90abcb18853437993e..7de2491e693710e7111e1f483a8469e4f0567942 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_reshape2_matmul_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestReshape2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | reshape2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py index ea4fee32a3e44510dc77f6dec9a842f4376f833a..8deee7c1517e8a660e9195fb03862adb1b12d3d2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_squeeze2_matmul_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestSqueeze2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | squeeze2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py index dfd80ee7f792d65de5f0a2d0dde5d5eb9812c774..41145be9bca0b16ed7995e8c750f87131a150ead 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_transpose_flatten_concat_fuse_pass.py @@ -20,7 +20,7 @@ import hypothesis.strategies as st class TestTransposeFlattenConcatFusePass(PassAutoScanTest): - """ + r""" x_1_var x_2_var | | transpose2 transpose2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py index 14f50e280e9dd08252d388f523bc1bdff1114ed8..698eae07ccf8e54ff6093ebf1037749b7e3ef104 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten2_matmul_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestFlatten2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | flatten2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py index 3d51197928b22eb992181f83bbdee20a4afc42d0..54c1f7226857353f7aac1f47587ec599cf7b7077 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_squeeze2_matmul_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestSqueeze2MatmulFusePass(PassAutoScanTest): - """ + r""" x_var | squeeze2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py index 698f8d772adf013089547153cdabb7449609ea0f..7ea2b494f3d17713fb811465690996e72b296731 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_unsqueeze2_eltwise_fuse_pass.py @@ -21,7 +21,7 @@ import hypothesis.strategies as st class TestUnsqueezeEltwiseFusePass(PassAutoScanTest): - """ + r""" y_var | unsqueeze2 diff --git a/python/paddle/incubate/nn/layer/fused_linear.py b/python/paddle/incubate/nn/layer/fused_linear.py index 65535d9318cc076ddac660aea7f5c2f7fcd4fcad..4a5f2c12105b2bd1e1f34c8cc287bd7c633e1ad5 100644 --- a/python/paddle/incubate/nn/layer/fused_linear.py +++ b/python/paddle/incubate/nn/layer/fused_linear.py @@ -17,7 +17,7 @@ from paddle.incubate.nn import functional as F class FusedLinear(Layer): - """ + r""" Linear layer takes only one multi-dimensional tensor as input with the shape :math:`[batch\_size, *, in\_features]` , where :math:`*` means any number of additional dimensions. It multiplies input tensor with the weight diff --git a/python/paddle/incubate/nn/layer/fused_transformer.py b/python/paddle/incubate/nn/layer/fused_transformer.py index f9cb7da823181b261b5ae19579758ac23422e053..ca58f81e19dd58e69ac0099212450e7671779f6e 100644 --- a/python/paddle/incubate/nn/layer/fused_transformer.py +++ b/python/paddle/incubate/nn/layer/fused_transformer.py @@ -1246,7 +1246,7 @@ class FusedMultiTransformer(Layer): caches=None, pre_caches=None, time_step=None): - """ + r""" Applies multi transformer layers on the input. Parameters: diff --git a/python/paddle/incubate/sparse/nn/functional/activation.py b/python/paddle/incubate/sparse/nn/functional/activation.py index 0e83abd2390e8b7f1f757e3136a6e3c440e90c97..9e4db7e1c4babdddd5e45f99c4fd53600108a76b 100644 --- a/python/paddle/incubate/sparse/nn/functional/activation.py +++ b/python/paddle/incubate/sparse/nn/functional/activation.py @@ -61,7 +61,7 @@ def relu(x, name=None): @dygraph_only def softmax(x, axis=-1, name=None): - """ + r""" sparse softmax activation, requiring x to be a SparseCooTensor or SparseCsrTensor. Note: @@ -146,7 +146,7 @@ def relu6(x, name=None): @dygraph_only def leaky_relu(x, negative_slope=0.01, name=None): - """ + r""" sparse leaky_relu activation, requiring x to be a SparseCooTensor or SparseCsrTensor. .. math:: diff --git a/python/paddle/incubate/sparse/nn/layer/activation.py b/python/paddle/incubate/sparse/nn/layer/activation.py index bcbf27fab9d6b4495f6b700164ae4839cf218536..da5c5225cedaeab2022d8923e228f8449f57be1e 100644 --- a/python/paddle/incubate/sparse/nn/layer/activation.py +++ b/python/paddle/incubate/sparse/nn/layer/activation.py @@ -59,7 +59,7 @@ class ReLU(Layer): class Softmax(Layer): - """ + r""" Sparse Softmax Activation, requiring x to be a SparseCooTensor or SparseCsrTensor. Note: @@ -164,7 +164,7 @@ class ReLU6(Layer): class LeakyReLU(Layer): - """ + r""" Sparse Leaky ReLU Activation, requiring x to be a SparseCooTensor or SparseCsrTensor. .. math:: diff --git a/python/paddle/incubate/sparse/unary.py b/python/paddle/incubate/sparse/unary.py index 3ebd84d8ed0de18a2a34b30d436ed59812b8aceb..b7ba1536f17342cdbc9aedbeebf6dda24b01cf2f 100644 --- a/python/paddle/incubate/sparse/unary.py +++ b/python/paddle/incubate/sparse/unary.py @@ -548,7 +548,7 @@ def coalesce(x): @dygraph_only def rad2deg(x, name=None): - """ + r""" Convert each of the elements of input x from radian to degree, requiring x to be a SparseCooTensor or SparseCsrTensor. @@ -581,7 +581,7 @@ def rad2deg(x, name=None): @dygraph_only def deg2rad(x, name=None): - """ + r""" Convert each of the elements of input x from degree to radian, requiring x to be a SparseCooTensor or SparseCsrTensor. diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 43c53c56e45e919876949dfa8ba1ac1687ad7e05..7e465b5974888d6c80888423c345c79294bf4f8a 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -1381,7 +1381,7 @@ def adaptive_avg_pool1d(x, output_size, name=None): def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): - """ + r""" Applies 2D adaptive avg pooling on input tensor. The h and w dimensions of the output tensor are determined by the parameter output_size. @@ -1502,7 +1502,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): - """ + r""" This operation applies 3D adaptive avg pooling on input tensor. The h and w dimensions of the output tensor are determined by the parameter output_size. diff --git a/python/paddle/profiler/profiler_statistic.py b/python/paddle/profiler/profiler_statistic.py index c0dc7fea44379775e7dbe27f6a9546d151f1e8bb..20465f1ae85ce93ee58403fcf1af6bd9ea12eb21 100755 --- a/python/paddle/profiler/profiler_statistic.py +++ b/python/paddle/profiler/profiler_statistic.py @@ -1399,7 +1399,7 @@ def _build_table(statistic_data, append(header_sep) append(row_format.format(*headers)) append(header_sep) - kernel_name_pattern = re.compile('(.+?)(<.*>)(\(.*\))') + kernel_name_pattern = re.compile(r'(.+?)(<.*>)(\(.*\))') for row_values in all_row_values: match = kernel_name_pattern.match(row_values[0]) if match: diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 10e0f6f775fe46c9047d2e34fcac7683d22f062f..208139b1abbb6de955a377c7732b66c59bd304cf 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -28,13 +28,13 @@ __all__ = [] def bernoulli(x, name=None): - """ + r""" For each element :math:`x_i` in input ``x``, take a sample from the Bernoulli distribution, also called two-point distribution, with success probability :math:`x_i`. The Bernoulli distribution with success probability :math:`x_i` is a discrete probability distribution with probability mass function .. math:: - p(y)=\\begin{cases} - x_i,&y=1\\\\ + p(y)=\begin{cases} + x_i,&y=1\\ 1-x_i,&y=0 \end{cases}. diff --git a/tools/analysisPyXml.py b/tools/analysisPyXml.py index bea07e36335115fde5948c33bee9f8c2c9b76c7a..b9280124aedba6b388281f804acc9ff07e550d2a 100644 --- a/tools/analysisPyXml.py +++ b/tools/analysisPyXml.py @@ -45,7 +45,7 @@ def analysisPyXml(rootPath, ut): '@', '\'\'\'', 'logger', '_logger', 'logging', 'r"""', 'pass', 'try', 'except', 'if __name__ == "__main__"')) == False: - pattern = "(.*) = ('*')|(.*) = (\"*\")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()" #a='b'/a="b"/a=0 + pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" #a='b'/a="b"/a=0 if re.match(pattern, output.strip()) == None: pyCov_file.append(clazz_filename) coverageMessage = 'RELATED' diff --git a/tools/get_pr_ut.py b/tools/get_pr_ut.py index f54f153eb434510f7ffb6e9cb9a633d79099902f..efe21b0639161dc41f77a91bb7210b2698366b39 100644 --- a/tools/get_pr_ut.py +++ b/tools/get_pr_ut.py @@ -36,12 +36,12 @@ class PRChecker(object): def __init__(self): self.github = Github(os.getenv('GITHUB_API_TOKEN'), timeout=60) self.repo = self.github.get_repo('PaddlePaddle/Paddle') - self.py_prog_oneline = re.compile('\d+\|\s*#.*') + self.py_prog_oneline = re.compile(r'\d+\|\s*#.*') self.py_prog_multiline_a = re.compile('"""(.*?)"""', re.DOTALL) self.py_prog_multiline_b = re.compile("'''(.*?)'''", re.DOTALL) - self.cc_prog_online = re.compile('\d+\|\s*//.*') - self.cc_prog_multiline = re.compile('\d+\|\s*/\*.*?\*/', re.DOTALL) - self.lineno_prog = re.compile('@@ \-\d+,\d+ \+(\d+),(\d+) @@') + self.cc_prog_online = re.compile(r'\d+\|\s*//.*') + self.cc_prog_multiline = re.compile(r'\d+\|\s*/\*.*?\*/', re.DOTALL) + self.lineno_prog = re.compile(r'@@ \-\d+,\d+ \+(\d+),(\d+) @@') self.pr = None self.suffix = '' self.full_case = False diff --git a/tools/get_ut_file_map.py b/tools/get_ut_file_map.py index 220bafa7e5b0a72043ec35ec0833334b2419c842..5afc6b03f2f96ddda297babe0483441bdd26bee3 100644 --- a/tools/get_ut_file_map.py +++ b/tools/get_ut_file_map.py @@ -34,7 +34,7 @@ def get_all_paddle_file(rootPath): def get_all_uts(rootPath): all_uts_paddle = '%s/build/all_uts_paddle' % rootPath os.system( - 'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s' + r'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s' % (rootPath, all_uts_paddle)) diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index 087c88d937be632c0d92a77926c56921c6c91067..7b97a06619eb3b32d5deeacb2a7244b8947672d8 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -217,7 +217,7 @@ def convert_op_proto_into_mlir(op_descs): |* Automatically generated file, do not edit! *|\n\ |* Generated by tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py *|\n\ |* *|\n\ -\*===----------------------------------------------------------------------===*/\n" +\\*===----------------------------------------------------------------------===*/\n" lines = [ "#ifndef PD_OPS", diff --git a/tools/infrt/generate_phi_kernel_dialect.py b/tools/infrt/generate_phi_kernel_dialect.py index cde7f44674c993e8e1a1fbadba9219f5ec0ee326..686a5e12a6c306a26a3d730b0ce956072c16e2d3 100644 --- a/tools/infrt/generate_phi_kernel_dialect.py +++ b/tools/infrt/generate_phi_kernel_dialect.py @@ -261,7 +261,7 @@ def generate_dialect_head(): |* Automatically generated file, do not edit! *|\n\ |* Generated by tools/infrt/generate_pten_kernel_dialect.py *|\n\ |* *|\n\ -\*===----------------------------------------------------------------------===*/\n" +\\*===----------------------------------------------------------------------===*/\n" includes_ = "#ifndef PTEN_KERNELS\n\ #define PTEN_KERNELS\n\ diff --git a/tools/infrt/get_compat_kernel_signature.py b/tools/infrt/get_compat_kernel_signature.py index 12e728598024be235f413ded0e0c8c944edb65ba..104d3ae30c23a77f06fabc9fd93df9d1e39928fc 100644 --- a/tools/infrt/get_compat_kernel_signature.py +++ b/tools/infrt/get_compat_kernel_signature.py @@ -65,7 +65,7 @@ def get_compat_kernels_info(): data = content.replace("\n", "").replace( " ", "").strip("return").strip("KernelSignature(").strip( - "\);").replace("\"", "").replace("\\", "") + r"\);").replace("\"", "").replace("\\", "") registry = False if is_grad_kernel(data): continue diff --git a/tools/jetson_infer_op.py b/tools/jetson_infer_op.py index c84d7d50d63e54c4b59089566b3abbd964844b07..e132a14373e4f1e2555cb9acbd8acfe4093e9b29 100644 --- a/tools/jetson_infer_op.py +++ b/tools/jetson_infer_op.py @@ -155,7 +155,7 @@ def set_diff_value(file, atol="1e-5", inplace_atol="1e-7"): :param inplace_atol: :return: """ - os.system("sed -i 's/self.check_output(/self\.check_output\(atol=" + atol + + os.system(r"sed -i 's/self.check_output(/self\.check_output\(atol=" + atol + ",inplace_atol=" + inplace_atol + ",/g\' " + file) @@ -179,8 +179,8 @@ def change_op_file(start=0, end=0, op_list_file='list_op.txt', path='.'): file_with_path = file_path[0] # pattern pattern_import = ".*import OpTest.*" - pattern_skip = "^class .*\(OpTest\):$" - pattern_return = "def test.*grad.*\):$" + pattern_skip = r"^class .*\(OpTest\):$" + pattern_return = r"def test.*grad.*\):$" # change file add_import_skip_return(file_with_path, pattern_import, pattern_skip, pattern_return) diff --git a/tools/prune_for_jetson.py b/tools/prune_for_jetson.py index e91b3b840d3ba07dadf6463a504d95e7e243457c..e87268a9fe2ee0d195c9e2aea7cc5a612d2772c5 100644 --- a/tools/prune_for_jetson.py +++ b/tools/prune_for_jetson.py @@ -77,7 +77,7 @@ def prune_phi_kernels(): all_matches = [] with open(op_file, 'r', encoding='utf-8') as f: content = ''.join(f.readlines()) - op_pattern = 'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}' + op_pattern = r'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}' op, op_count = find_kernel(content, op_pattern) register_op_count += op_count all_matches.extend(op) @@ -143,11 +143,12 @@ def append_fluid_kernels(): for op in op_white_list: patterns = { - "REGISTER_OPERATOR": "REGISTER_OPERATOR\(\s*%s\s*," % op, + "REGISTER_OPERATOR": + r"REGISTER_OPERATOR\(\s*%s\s*," % op, "REGISTER_OP_CPU_KERNEL": - "REGISTER_OP_CPU_KERNEL\(\s*%s\s*," % op, + r"REGISTER_OP_CPU_KERNEL\(\s*%s\s*," % op, "REGISTER_OP_CUDA_KERNEL": - "REGISTER_OP_CUDA_KERNEL\(\s*%s\s*," % op + r"REGISTER_OP_CUDA_KERNEL\(\s*%s\s*," % op } for k, p in patterns.items(): matches = re.findall(p, content, flags=re.DOTALL) diff --git a/tools/remove_grad_op_and_kernel.py b/tools/remove_grad_op_and_kernel.py index 08f52fc6049d6d76d9d59e66017811f90e46219a..19778c27dbe12bf510f75a0edaa45ea8ec46f1bb 100644 --- a/tools/remove_grad_op_and_kernel.py +++ b/tools/remove_grad_op_and_kernel.py @@ -43,7 +43,7 @@ def remove_grad_op_and_kernel(content, pattern1, pattern2): def update_operator_cmake(cmake_file): pat1 = 'add_subdirectory(optimizers)' - pat2 = 'register_operators\(EXCLUDES.*?py_func_op.*?\)' + pat2 = r'register_operators\(EXCLUDES.*?py_func_op.*?\)' code1 = 'if(ON_INFER)\nadd_subdirectory(optimizers)\nendif()' code2 = 'if(ON_INFER)\nfile(GLOB LOSS_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*loss_op.cc")\nstring(REPLACE ".cc" "" LOSS_OPS "${LOSS_OPS}")\nendif()' @@ -80,27 +80,27 @@ if __name__ == '__main__': # 1. remove all grad op and kernel for op_file in all_op: # remove all grad op - op_pattern1 = 'REGISTER_OPERATOR\(.*?\);?' - op_pattern2 = 'REGISTER_OPERATOR\(.*?_grad,.*?\);?' + op_pattern1 = r'REGISTER_OPERATOR\(.*?\);?' + op_pattern2 = r'REGISTER_OPERATOR\(.*?_grad,.*?\);?' # remove all cpu grad kernel - cpu_kernel_pattern1 = 'REGISTER_OP_CPU_KERNEL\(.*?\);?' - cpu_kernel_pattern2 = 'REGISTER_OP_CPU_KERNEL\(.*?_grad,.*?\);?' + cpu_kernel_pattern1 = r'REGISTER_OP_CPU_KERNEL\(.*?\);?' + cpu_kernel_pattern2 = r'REGISTER_OP_CPU_KERNEL\(.*?_grad,.*?\);?' # remove all gpu grad kernel - gpu_kernel_pattern1 = 'REGISTER_OP_CUDA_KERNEL\(.*?\);?' - gpu_kernel_pattern2 = 'REGISTER_OP_CUDA_KERNEL\(.*?_grad,.*?\);?' + gpu_kernel_pattern1 = r'REGISTER_OP_CUDA_KERNEL\(.*?\);?' + gpu_kernel_pattern2 = r'REGISTER_OP_CUDA_KERNEL\(.*?_grad,.*?\);?' # remove all xpu grad kernel - xpu_kernel_pattern1 = 'REGISTER_OP_XPU_KERNEL\(.*?\);?' - xpu_kernel_pattern2 = 'REGISTER_OP_XPU_KERNEL\(.*?_grad,.*?\);?' + xpu_kernel_pattern1 = r'REGISTER_OP_XPU_KERNEL\(.*?\);?' + xpu_kernel_pattern2 = r'REGISTER_OP_XPU_KERNEL\(.*?_grad,.*?\);?' # remove custom grad kernel, mkldnn or cudnn etc. - op_kernel_pattern1 = 'REGISTER_OP_KERNEL\(.*?\);?' - op_kernel_pattern2 = 'REGISTER_OP_KERNEL\(.*?_grad,.*?\);?' + op_kernel_pattern1 = r'REGISTER_OP_KERNEL\(.*?\);?' + op_kernel_pattern2 = r'REGISTER_OP_KERNEL\(.*?_grad,.*?\);?' - custom_pattern1 = 'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?\);?' - custom_pattern2 = 'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?_grad,.*?\);?' + custom_pattern1 = r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?\);?' + custom_pattern2 = r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?_grad,.*?\);?' op_name = os.path.split(op_file)[1] if op_name in spec_ops: