未验证 提交 e1c0461d 编写于 作者: T Tony Cao 提交者: GitHub

[CodeStyle][W605] Add escape symbols to some strings (#46752)

* Fix W605 in tools folder by adding escape symbols

* Fix W605 in incubate and some other folders

* Fix W605 in /fluid/test folders

* Update tools/analysisPyXml.py
Co-authored-by: NNyakku Shigure <sigure.qaq@gmail.com>

* Add some changes to manual and auto escape symbols

* revert changes in transformer.py

* Fix new code with W605 error: add escape symbols

* revert changes in transformer.py

* revert changes in transformer.py
Co-authored-by: NNyakku Shigure <sigure.qaq@gmail.com>
上级 c91b1b91
......@@ -295,7 +295,7 @@ def ParseYamlForwardFromBackward(string):
wspace = r'\s*'
fargs = r'(.*?)'
frets = r'(.*)'
pattern = f'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}'
pattern = fr'{fname}{wspace}\({wspace}{fargs}{wspace}\){wspace}->{wspace}{frets}'
m = re.search(pattern, string)
function_name = m.group(1)
......@@ -314,7 +314,7 @@ def ParseYamlForward(args_str, returns_str):
fargs = r'(.*?)'
wspace = r'\s*'
args_pattern = f'^\({fargs}\)$'
args_pattern = fr'^\({fargs}\)$'
args_str = re.search(args_pattern, args_str.strip()).group(1)
inputs_list, attrs_list = ParseYamlArgs(args_str)
......@@ -329,7 +329,7 @@ def ParseYamlBackward(args_str, returns_str):
fargs = r'(.*?)'
wspace = r'\s*'
args_pattern = f'\({fargs}\)'
args_pattern = fr'\({fargs}\)'
args_str = re.search(args_pattern, args_str).group(1)
inputs_list, attrs_list = ParseYamlArgs(args_str)
......
......@@ -32,7 +32,7 @@ HOME = os.path.expanduser('~')
# If the default HOME dir does not support writing, we
# will create a temporary folder to store the cache files.
if not os.access(HOME, os.W_OK):
"""
r"""
gettempdir() return the name of the directory used for temporary files.
On Windows, the directories C:\TEMP, C:\TMP, \TEMP, and \TMP, in that order.
On all other platforms, the directories /tmp, /var/tmp, and /usr/tmp, in that order.
......
......@@ -211,7 +211,7 @@ class GroupShardedOptimizerStage2(Optimizer):
if self._broadcast_order_params is None:
# Params' names should be like column_linear_32.w_0 patter to get the best performance.
warnings.warn(
"The param name passed to the optimizer doesn't follow .+_[0-9]+\..+ patter, "
r"The param name passed to the optimizer doesn't follow .+_[0-9]+\..+ patter, "
"overlap broadcast may harm the performance.")
self._broadcast_order_params = self._local_params
......
......@@ -98,7 +98,7 @@ class Gumbel(TransformedDistribution):
@property
def mean(self):
"""Mean of distribution
r"""Mean of distribution
The mean is
......@@ -120,7 +120,7 @@ class Gumbel(TransformedDistribution):
@property
def variance(self):
"""Variance of distribution.
r"""Variance of distribution.
The variance is
......@@ -144,7 +144,7 @@ class Gumbel(TransformedDistribution):
@property
def stddev(self):
"""Standard deviation of distribution
r"""Standard deviation of distribution
The standard deviation is
......
......@@ -105,7 +105,7 @@ class Laplace(distribution.Distribution):
@property
def variance(self):
"""Variance of distribution.
r"""Variance of distribution.
The variance is
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestConvActMkldnnFusePass(PassAutoScanTest):
"""
r"""
x_var f_var(persistable)
\ /
conv2d
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestConvBiasMkldnnFusePass(PassAutoScanTest):
"""
r"""
x_var f_var(persistable)
\ /
conv2d
......
......@@ -23,7 +23,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0'
class TestConvElementwiseAdd2ActPass(PassAutoScanTest):
"""
r"""
x_var f_var(persistable)
\ /
conv2d
......
......@@ -22,7 +22,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0'
class TestConvElementwiseAddActPass(PassAutoScanTest):
"""
r"""
x_var f_var(persistable)
\ /
conv2d
......
......@@ -24,7 +24,7 @@ os.environ['NVIDIA_TF32_OVERRIDE'] = '0'
class TestConvEltwiseaddBnFusePass(PassAutoScanTest):
"""
r"""
x_var f_var(persistable)
\ /
conv2d
......
......@@ -21,7 +21,7 @@ import hypothesis.strategies as st
class TestConvTransposeBnFusePass(PassAutoScanTest):
'''
r'''
conv_input conv_weight_var(persistable)
\ /
conv_op
......
......@@ -21,7 +21,7 @@ import hypothesis.strategies as st
class TestConvTransposeEltwiseaddBnFusePass(PassAutoScanTest):
'''
r'''
conv_input conv_weight_var(persistable)
\ /
conv_op
......
......@@ -23,7 +23,7 @@ import hypothesis.strategies as st
class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest):
'''
r'''
in_var1 emb_var in_var2 emb_var in_var3 emb_var in_var emb_var
| | | | | | | |
lookup_table lookup_table lookup_table ... lookup_table
......
......@@ -34,7 +34,7 @@ class FcElementLayernormFusePassDataGen:
class TestFCElementwiseLayerNormFusePass(PassAutoScanTest):
"""
r"""
x_var w(persistable) bias_var(persistable)
\ | /
fc
......
......@@ -22,7 +22,7 @@ import hypothesis.strategies as st
class TestFcFusePass(PassAutoScanTest):
"""
r"""
x_var y_var(persistable)
\ /
mul bias_var(persistable)
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestFlatten2MatmulFusePass(PassAutoScanTest):
"""
r"""
x_var
|
flatten2
......
......@@ -21,7 +21,7 @@ import hypothesis.strategies as st
class TestFcFusePass(PassAutoScanTest):
"""
r"""
x_var
/ \
/ reduce_mean "u(x)"
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestMapMatmulToMulPass(PassAutoScanTest):
"""
r"""
x_var y_var(persistable)
\ /
matmul
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestMapMatmulToMulPass(PassAutoScanTest):
"""
r"""
x_var y_var(persistable)
\ /
matmul_v2
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestMapMatmulToMulPass(PassAutoScanTest):
"""
r"""
x_var y_var(persistable)
\ /
matmul_v2
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestMatmulScaleFusePass(PassAutoScanTest):
"""
r"""
x_var y_var(persistable)
\ /
matmul
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestMatmulV2ScaleFusePass(PassAutoScanTest):
"""
r"""
x_var y_var(persistable) x_var y_var*scale(persistable)
\ / \ /
matmul_v2 matmul_v2
......
......@@ -21,7 +21,7 @@ import hypothesis.strategies as st
class DepthwiseConvMKLDNNPass(PassAutoScanTest):
'''
r'''
conv_input conv_weight_var(persistable)
\ /
conv_op
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestReshape2MatmulFusePass(PassAutoScanTest):
"""
r"""
x_var
|
reshape2
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestSqueeze2MatmulFusePass(PassAutoScanTest):
"""
r"""
x_var
|
squeeze2
......
......@@ -20,7 +20,7 @@ import hypothesis.strategies as st
class TestTransposeFlattenConcatFusePass(PassAutoScanTest):
"""
r"""
x_1_var x_2_var
| |
transpose2 transpose2
......
......@@ -21,7 +21,7 @@ import hypothesis.strategies as st
class TestFlatten2MatmulFusePass(PassAutoScanTest):
"""
r"""
x_var
|
flatten2
......
......@@ -21,7 +21,7 @@ import hypothesis.strategies as st
class TestSqueeze2MatmulFusePass(PassAutoScanTest):
"""
r"""
x_var
|
squeeze2
......
......@@ -21,7 +21,7 @@ import hypothesis.strategies as st
class TestUnsqueezeEltwiseFusePass(PassAutoScanTest):
"""
r"""
y_var
|
unsqueeze2
......
......@@ -17,7 +17,7 @@ from paddle.incubate.nn import functional as F
class FusedLinear(Layer):
"""
r"""
Linear layer takes only one multi-dimensional tensor as input with the
shape :math:`[batch\_size, *, in\_features]` , where :math:`*` means any
number of additional dimensions. It multiplies input tensor with the weight
......
......@@ -1246,7 +1246,7 @@ class FusedMultiTransformer(Layer):
caches=None,
pre_caches=None,
time_step=None):
"""
r"""
Applies multi transformer layers on the input.
Parameters:
......
......@@ -61,7 +61,7 @@ def relu(x, name=None):
@dygraph_only
def softmax(x, axis=-1, name=None):
"""
r"""
sparse softmax activation, requiring x to be a SparseCooTensor or SparseCsrTensor.
Note:
......@@ -146,7 +146,7 @@ def relu6(x, name=None):
@dygraph_only
def leaky_relu(x, negative_slope=0.01, name=None):
"""
r"""
sparse leaky_relu activation, requiring x to be a SparseCooTensor or SparseCsrTensor.
.. math::
......
......@@ -59,7 +59,7 @@ class ReLU(Layer):
class Softmax(Layer):
"""
r"""
Sparse Softmax Activation, requiring x to be a SparseCooTensor or SparseCsrTensor.
Note:
......@@ -164,7 +164,7 @@ class ReLU6(Layer):
class LeakyReLU(Layer):
"""
r"""
Sparse Leaky ReLU Activation, requiring x to be a SparseCooTensor or SparseCsrTensor.
.. math::
......
......@@ -548,7 +548,7 @@ def coalesce(x):
@dygraph_only
def rad2deg(x, name=None):
"""
r"""
Convert each of the elements of input x from radian to degree,
requiring x to be a SparseCooTensor or SparseCsrTensor.
......@@ -581,7 +581,7 @@ def rad2deg(x, name=None):
@dygraph_only
def deg2rad(x, name=None):
"""
r"""
Convert each of the elements of input x from degree to radian,
requiring x to be a SparseCooTensor or SparseCsrTensor.
......
......@@ -1381,7 +1381,7 @@ def adaptive_avg_pool1d(x, output_size, name=None):
def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
"""
r"""
Applies 2D adaptive avg pooling on input tensor. The h and w dimensions
of the output tensor are determined by the parameter output_size.
......@@ -1502,7 +1502,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
"""
r"""
This operation applies 3D adaptive avg pooling on input tensor. The h and w dimensions
of the output tensor are determined by the parameter output_size.
......
......@@ -1399,7 +1399,7 @@ def _build_table(statistic_data,
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
kernel_name_pattern = re.compile('(.+?)(<.*>)(\(.*\))')
kernel_name_pattern = re.compile(r'(.+?)(<.*>)(\(.*\))')
for row_values in all_row_values:
match = kernel_name_pattern.match(row_values[0])
if match:
......
......@@ -28,13 +28,13 @@ __all__ = []
def bernoulli(x, name=None):
"""
r"""
For each element :math:`x_i` in input ``x``, take a sample from the Bernoulli distribution, also called two-point distribution, with success probability :math:`x_i`. The Bernoulli distribution with success probability :math:`x_i` is a discrete probability distribution with probability mass function
.. math::
p(y)=\\begin{cases}
x_i,&y=1\\\\
p(y)=\begin{cases}
x_i,&y=1\\
1-x_i,&y=0
\end{cases}.
......
......@@ -45,7 +45,7 @@ def analysisPyXml(rootPath, ut):
'@', '\'\'\'', 'logger', '_logger', 'logging', 'r"""',
'pass', 'try', 'except',
'if __name__ == "__main__"')) == False:
pattern = "(.*) = ('*')|(.*) = (\"*\")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()" #a='b'/a="b"/a=0
pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" #a='b'/a="b"/a=0
if re.match(pattern, output.strip()) == None:
pyCov_file.append(clazz_filename)
coverageMessage = 'RELATED'
......
......@@ -36,12 +36,12 @@ class PRChecker(object):
def __init__(self):
self.github = Github(os.getenv('GITHUB_API_TOKEN'), timeout=60)
self.repo = self.github.get_repo('PaddlePaddle/Paddle')
self.py_prog_oneline = re.compile('\d+\|\s*#.*')
self.py_prog_oneline = re.compile(r'\d+\|\s*#.*')
self.py_prog_multiline_a = re.compile('"""(.*?)"""', re.DOTALL)
self.py_prog_multiline_b = re.compile("'''(.*?)'''", re.DOTALL)
self.cc_prog_online = re.compile('\d+\|\s*//.*')
self.cc_prog_multiline = re.compile('\d+\|\s*/\*.*?\*/', re.DOTALL)
self.lineno_prog = re.compile('@@ \-\d+,\d+ \+(\d+),(\d+) @@')
self.cc_prog_online = re.compile(r'\d+\|\s*//.*')
self.cc_prog_multiline = re.compile(r'\d+\|\s*/\*.*?\*/', re.DOTALL)
self.lineno_prog = re.compile(r'@@ \-\d+,\d+ \+(\d+),(\d+) @@')
self.pr = None
self.suffix = ''
self.full_case = False
......
......@@ -34,7 +34,7 @@ def get_all_paddle_file(rootPath):
def get_all_uts(rootPath):
all_uts_paddle = '%s/build/all_uts_paddle' % rootPath
os.system(
'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s'
r'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s'
% (rootPath, all_uts_paddle))
......
......@@ -217,7 +217,7 @@ def convert_op_proto_into_mlir(op_descs):
|* Automatically generated file, do not edit! *|\n\
|* Generated by tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py *|\n\
|* *|\n\
\*===----------------------------------------------------------------------===*/\n"
\\*===----------------------------------------------------------------------===*/\n"
lines = [
"#ifndef PD_OPS",
......
......@@ -261,7 +261,7 @@ def generate_dialect_head():
|* Automatically generated file, do not edit! *|\n\
|* Generated by tools/infrt/generate_pten_kernel_dialect.py *|\n\
|* *|\n\
\*===----------------------------------------------------------------------===*/\n"
\\*===----------------------------------------------------------------------===*/\n"
includes_ = "#ifndef PTEN_KERNELS\n\
#define PTEN_KERNELS\n\
......
......@@ -65,7 +65,7 @@ def get_compat_kernels_info():
data = content.replace("\n", "").replace(
" ",
"").strip("return").strip("KernelSignature(").strip(
"\);").replace("\"", "").replace("\\", "")
r"\);").replace("\"", "").replace("\\", "")
registry = False
if is_grad_kernel(data):
continue
......
......@@ -155,7 +155,7 @@ def set_diff_value(file, atol="1e-5", inplace_atol="1e-7"):
:param inplace_atol:
:return:
"""
os.system("sed -i 's/self.check_output(/self\.check_output\(atol=" + atol +
os.system(r"sed -i 's/self.check_output(/self\.check_output\(atol=" + atol +
",inplace_atol=" + inplace_atol + ",/g\' " + file)
......@@ -179,8 +179,8 @@ def change_op_file(start=0, end=0, op_list_file='list_op.txt', path='.'):
file_with_path = file_path[0]
# pattern
pattern_import = ".*import OpTest.*"
pattern_skip = "^class .*\(OpTest\):$"
pattern_return = "def test.*grad.*\):$"
pattern_skip = r"^class .*\(OpTest\):$"
pattern_return = r"def test.*grad.*\):$"
# change file
add_import_skip_return(file_with_path, pattern_import, pattern_skip,
pattern_return)
......
......@@ -77,7 +77,7 @@ def prune_phi_kernels():
all_matches = []
with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())
op_pattern = 'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}'
op_pattern = r'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}'
op, op_count = find_kernel(content, op_pattern)
register_op_count += op_count
all_matches.extend(op)
......@@ -143,11 +143,12 @@ def append_fluid_kernels():
for op in op_white_list:
patterns = {
"REGISTER_OPERATOR": "REGISTER_OPERATOR\(\s*%s\s*," % op,
"REGISTER_OPERATOR":
r"REGISTER_OPERATOR\(\s*%s\s*," % op,
"REGISTER_OP_CPU_KERNEL":
"REGISTER_OP_CPU_KERNEL\(\s*%s\s*," % op,
r"REGISTER_OP_CPU_KERNEL\(\s*%s\s*," % op,
"REGISTER_OP_CUDA_KERNEL":
"REGISTER_OP_CUDA_KERNEL\(\s*%s\s*," % op
r"REGISTER_OP_CUDA_KERNEL\(\s*%s\s*," % op
}
for k, p in patterns.items():
matches = re.findall(p, content, flags=re.DOTALL)
......
......@@ -43,7 +43,7 @@ def remove_grad_op_and_kernel(content, pattern1, pattern2):
def update_operator_cmake(cmake_file):
pat1 = 'add_subdirectory(optimizers)'
pat2 = 'register_operators\(EXCLUDES.*?py_func_op.*?\)'
pat2 = r'register_operators\(EXCLUDES.*?py_func_op.*?\)'
code1 = 'if(ON_INFER)\nadd_subdirectory(optimizers)\nendif()'
code2 = 'if(ON_INFER)\nfile(GLOB LOSS_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*loss_op.cc")\nstring(REPLACE ".cc" "" LOSS_OPS "${LOSS_OPS}")\nendif()'
......@@ -80,27 +80,27 @@ if __name__ == '__main__':
# 1. remove all grad op and kernel
for op_file in all_op:
# remove all grad op
op_pattern1 = 'REGISTER_OPERATOR\(.*?\);?'
op_pattern2 = 'REGISTER_OPERATOR\(.*?_grad,.*?\);?'
op_pattern1 = r'REGISTER_OPERATOR\(.*?\);?'
op_pattern2 = r'REGISTER_OPERATOR\(.*?_grad,.*?\);?'
# remove all cpu grad kernel
cpu_kernel_pattern1 = 'REGISTER_OP_CPU_KERNEL\(.*?\);?'
cpu_kernel_pattern2 = 'REGISTER_OP_CPU_KERNEL\(.*?_grad,.*?\);?'
cpu_kernel_pattern1 = r'REGISTER_OP_CPU_KERNEL\(.*?\);?'
cpu_kernel_pattern2 = r'REGISTER_OP_CPU_KERNEL\(.*?_grad,.*?\);?'
# remove all gpu grad kernel
gpu_kernel_pattern1 = 'REGISTER_OP_CUDA_KERNEL\(.*?\);?'
gpu_kernel_pattern2 = 'REGISTER_OP_CUDA_KERNEL\(.*?_grad,.*?\);?'
gpu_kernel_pattern1 = r'REGISTER_OP_CUDA_KERNEL\(.*?\);?'
gpu_kernel_pattern2 = r'REGISTER_OP_CUDA_KERNEL\(.*?_grad,.*?\);?'
# remove all xpu grad kernel
xpu_kernel_pattern1 = 'REGISTER_OP_XPU_KERNEL\(.*?\);?'
xpu_kernel_pattern2 = 'REGISTER_OP_XPU_KERNEL\(.*?_grad,.*?\);?'
xpu_kernel_pattern1 = r'REGISTER_OP_XPU_KERNEL\(.*?\);?'
xpu_kernel_pattern2 = r'REGISTER_OP_XPU_KERNEL\(.*?_grad,.*?\);?'
# remove custom grad kernel, mkldnn or cudnn etc.
op_kernel_pattern1 = 'REGISTER_OP_KERNEL\(.*?\);?'
op_kernel_pattern2 = 'REGISTER_OP_KERNEL\(.*?_grad,.*?\);?'
op_kernel_pattern1 = r'REGISTER_OP_KERNEL\(.*?\);?'
op_kernel_pattern2 = r'REGISTER_OP_KERNEL\(.*?_grad,.*?\);?'
custom_pattern1 = 'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?\);?'
custom_pattern2 = 'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?_grad,.*?\);?'
custom_pattern1 = r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?\);?'
custom_pattern2 = r'REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE\(.*?_grad,.*?\);?'
op_name = os.path.split(op_file)[1]
if op_name in spec_ops:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册