未验证 提交 75528ad6 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle] use built-in `open` instead of `io.open` (#46751)

* [CodeStyle] use built-in `open` instead of `io.open`

* revert flake8 config changes
上级 a5ce223c
......@@ -15,7 +15,6 @@
# limitations under the License.
import copy
import io
import json
import os
import unicodedata
......@@ -464,7 +463,7 @@ class PretrainedTokenizer(object):
tokenizer_config_file = resolved_vocab_files.pop(
"tokenizer_config_file", None)
if tokenizer_config_file is not None:
with io.open(tokenizer_config_file, encoding="utf-8") as f:
with open(tokenizer_config_file, 'r', encoding="utf-8") as f:
init_kwargs = json.load(f)
else:
init_kwargs = init_configuration
......@@ -527,7 +526,7 @@ class PretrainedTokenizer(object):
self.tokenizer_config_file)
# init_config is set in metaclass created `__init__`,
tokenizer_config = self.init_config
with io.open(tokenizer_config_file, "w", encoding="utf-8") as f:
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
self.save_resources(save_directory)
......@@ -571,7 +570,7 @@ class PretrainedTokenizer(object):
Vocab: An instance of `Vocab`.
"""
token_to_idx = {}
with io.open(filepath, 'r', encoding='utf-8') as f:
with open(filepath, 'r', encoding='utf-8') as f:
for index, line in enumerate(f):
token = line.rstrip('\n')
token_to_idx[token] = int(index)
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import argparse
import io
import re
import sys
import os
......@@ -67,7 +66,7 @@ RE_SHEBANG = re.compile(r"^[ \t\v]*#[ \t]?\!")
def _check_copyright(path):
head=[]
try:
with open(path) as f:
with open(path, 'r', encoding='utf-8') as f:
head = [next(f) for x in range(4)]
except StopIteration:
pass
......@@ -79,7 +78,7 @@ def _check_copyright(path):
return False
def generate_copyright(path, comment_mark):
original_contents = io.open(path, encoding="utf-8").readlines()
original_contents = open(path, 'r', encoding="utf-8").readlines()
head = original_contents[0:4]
insert_line_no=0
......@@ -102,7 +101,7 @@ def generate_copyright(path, comment_mark):
new_contents.extend(original_contents[insert_line_no:])
new_contents="".join(new_contents)
with io.open(path, 'w') as output_file:
with open(path, 'w', encoding='utf-8') as output_file:
output_file.write(new_contents)
......
......@@ -20,7 +20,6 @@ import os
import sys
import re
import glob
import io
def find_type_files(cur_dir, file_type, file_list=[]):
......@@ -77,7 +76,7 @@ def prune_phi_kernels():
op_name = os.path.split(op_file)[1]
all_matches = []
with io.open(op_file, 'r', encoding='utf-8') as f:
with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())
op_pattern = 'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}'
op, op_count = find_kernel(content, op_pattern)
......@@ -87,7 +86,7 @@ def prune_phi_kernels():
for p in all_matches:
content = content.replace(p, '')
with io.open(op_file, 'w', encoding='utf-8') as f:
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))
print('We erase all grad op and kernel for Paddle-Inference lib.')
......@@ -115,7 +114,7 @@ def append_fluid_kernels():
for op in op_white_list:
append_str = append_str + "file(APPEND ${pybind_file} \"USE_OP__(%s);\\n\")\n" % op
with io.open(file_name, 'r', encoding='utf-8') as f:
with open(file_name, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())
location_str = "nv_library(\n tensorrt_op_teller\n SRCS op_teller.cc\n DEPS framework_proto device_context)"
......@@ -126,7 +125,7 @@ def append_fluid_kernels():
(location_str, file_name))
return False
with io.open(file_name, 'w', encoding='utf-8') as f:
with open(file_name, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(new_content))
#2. add op and kernel register
......@@ -140,7 +139,7 @@ def append_fluid_kernels():
recursive=True)
for op_file in all_op:
with io.open(op_file, 'r', encoding='utf-8') as f:
with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())
for op in op_white_list:
......@@ -156,7 +155,7 @@ def append_fluid_kernels():
if len(matches) > 0:
content = content.replace(matches[0],
matches[0].replace(k, k + "__"))
with io.open(op_file, 'w', encoding='utf-8') as f:
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))
return True
......
......@@ -20,7 +20,6 @@ import os
import sys
import re
import glob
import io
def find_type_files(cur_dir, file_type, file_list=[]):
......@@ -120,7 +119,7 @@ if __name__ == '__main__':
custom_pattern2 = custom_pattern2[:-1]
all_matches = []
with io.open(op_file, 'r', encoding='utf-8') as f:
with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())
op, op_count = remove_grad_op_and_kernel(content, op_pattern1,
......@@ -153,7 +152,7 @@ if __name__ == '__main__':
for i in all_matches:
content = content.replace(i, '')
with io.open(op_file, 'w', encoding='utf-8') as f:
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))
# 2. update operators/CMakeLists.txt
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册