未验证 提交 75528ad6 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle] use built-in `open` instead of `io.open` (#46751)

* [CodeStyle] use built-in `open` instead of `io.open`

* revert flake8 config changes
上级 a5ce223c
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
import copy import copy
import io
import json import json
import os import os
import unicodedata import unicodedata
...@@ -464,7 +463,7 @@ class PretrainedTokenizer(object): ...@@ -464,7 +463,7 @@ class PretrainedTokenizer(object):
tokenizer_config_file = resolved_vocab_files.pop( tokenizer_config_file = resolved_vocab_files.pop(
"tokenizer_config_file", None) "tokenizer_config_file", None)
if tokenizer_config_file is not None: if tokenizer_config_file is not None:
with io.open(tokenizer_config_file, encoding="utf-8") as f: with open(tokenizer_config_file, 'r', encoding="utf-8") as f:
init_kwargs = json.load(f) init_kwargs = json.load(f)
else: else:
init_kwargs = init_configuration init_kwargs = init_configuration
...@@ -527,7 +526,7 @@ class PretrainedTokenizer(object): ...@@ -527,7 +526,7 @@ class PretrainedTokenizer(object):
self.tokenizer_config_file) self.tokenizer_config_file)
# init_config is set in metaclass created `__init__`, # init_config is set in metaclass created `__init__`,
tokenizer_config = self.init_config tokenizer_config = self.init_config
with io.open(tokenizer_config_file, "w", encoding="utf-8") as f: with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False)) f.write(json.dumps(tokenizer_config, ensure_ascii=False))
self.save_resources(save_directory) self.save_resources(save_directory)
...@@ -571,7 +570,7 @@ class PretrainedTokenizer(object): ...@@ -571,7 +570,7 @@ class PretrainedTokenizer(object):
Vocab: An instance of `Vocab`. Vocab: An instance of `Vocab`.
""" """
token_to_idx = {} token_to_idx = {}
with io.open(filepath, 'r', encoding='utf-8') as f: with open(filepath, 'r', encoding='utf-8') as f:
for index, line in enumerate(f): for index, line in enumerate(f):
token = line.rstrip('\n') token = line.rstrip('\n')
token_to_idx[token] = int(index) token_to_idx[token] = int(index)
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import argparse import argparse
import io
import re import re
import sys import sys
import os import os
...@@ -67,7 +66,7 @@ RE_SHEBANG = re.compile(r"^[ \t\v]*#[ \t]?\!") ...@@ -67,7 +66,7 @@ RE_SHEBANG = re.compile(r"^[ \t\v]*#[ \t]?\!")
def _check_copyright(path): def _check_copyright(path):
head=[] head=[]
try: try:
with open(path) as f: with open(path, 'r', encoding='utf-8') as f:
head = [next(f) for x in range(4)] head = [next(f) for x in range(4)]
except StopIteration: except StopIteration:
pass pass
...@@ -79,7 +78,7 @@ def _check_copyright(path): ...@@ -79,7 +78,7 @@ def _check_copyright(path):
return False return False
def generate_copyright(path, comment_mark): def generate_copyright(path, comment_mark):
original_contents = io.open(path, encoding="utf-8").readlines() original_contents = open(path, 'r', encoding="utf-8").readlines()
head = original_contents[0:4] head = original_contents[0:4]
insert_line_no=0 insert_line_no=0
...@@ -102,7 +101,7 @@ def generate_copyright(path, comment_mark): ...@@ -102,7 +101,7 @@ def generate_copyright(path, comment_mark):
new_contents.extend(original_contents[insert_line_no:]) new_contents.extend(original_contents[insert_line_no:])
new_contents="".join(new_contents) new_contents="".join(new_contents)
with io.open(path, 'w') as output_file: with open(path, 'w', encoding='utf-8') as output_file:
output_file.write(new_contents) output_file.write(new_contents)
......
...@@ -20,7 +20,6 @@ import os ...@@ -20,7 +20,6 @@ import os
import sys import sys
import re import re
import glob import glob
import io
def find_type_files(cur_dir, file_type, file_list=[]): def find_type_files(cur_dir, file_type, file_list=[]):
...@@ -77,7 +76,7 @@ def prune_phi_kernels(): ...@@ -77,7 +76,7 @@ def prune_phi_kernels():
op_name = os.path.split(op_file)[1] op_name = os.path.split(op_file)[1]
all_matches = [] all_matches = []
with io.open(op_file, 'r', encoding='utf-8') as f: with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines()) content = ''.join(f.readlines())
op_pattern = 'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}' op_pattern = 'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}'
op, op_count = find_kernel(content, op_pattern) op, op_count = find_kernel(content, op_pattern)
...@@ -87,7 +86,7 @@ def prune_phi_kernels(): ...@@ -87,7 +86,7 @@ def prune_phi_kernels():
for p in all_matches: for p in all_matches:
content = content.replace(p, '') content = content.replace(p, '')
with io.open(op_file, 'w', encoding='utf-8') as f: with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content)) f.write(u'{}'.format(content))
print('We erase all grad op and kernel for Paddle-Inference lib.') print('We erase all grad op and kernel for Paddle-Inference lib.')
...@@ -115,7 +114,7 @@ def append_fluid_kernels(): ...@@ -115,7 +114,7 @@ def append_fluid_kernels():
for op in op_white_list: for op in op_white_list:
append_str = append_str + "file(APPEND ${pybind_file} \"USE_OP__(%s);\\n\")\n" % op append_str = append_str + "file(APPEND ${pybind_file} \"USE_OP__(%s);\\n\")\n" % op
with io.open(file_name, 'r', encoding='utf-8') as f: with open(file_name, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines()) content = ''.join(f.readlines())
location_str = "nv_library(\n tensorrt_op_teller\n SRCS op_teller.cc\n DEPS framework_proto device_context)" location_str = "nv_library(\n tensorrt_op_teller\n SRCS op_teller.cc\n DEPS framework_proto device_context)"
...@@ -126,7 +125,7 @@ def append_fluid_kernels(): ...@@ -126,7 +125,7 @@ def append_fluid_kernels():
(location_str, file_name)) (location_str, file_name))
return False return False
with io.open(file_name, 'w', encoding='utf-8') as f: with open(file_name, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(new_content)) f.write(u'{}'.format(new_content))
#2. add op and kernel register #2. add op and kernel register
...@@ -140,7 +139,7 @@ def append_fluid_kernels(): ...@@ -140,7 +139,7 @@ def append_fluid_kernels():
recursive=True) recursive=True)
for op_file in all_op: for op_file in all_op:
with io.open(op_file, 'r', encoding='utf-8') as f: with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines()) content = ''.join(f.readlines())
for op in op_white_list: for op in op_white_list:
...@@ -156,7 +155,7 @@ def append_fluid_kernels(): ...@@ -156,7 +155,7 @@ def append_fluid_kernels():
if len(matches) > 0: if len(matches) > 0:
content = content.replace(matches[0], content = content.replace(matches[0],
matches[0].replace(k, k + "__")) matches[0].replace(k, k + "__"))
with io.open(op_file, 'w', encoding='utf-8') as f: with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content)) f.write(u'{}'.format(content))
return True return True
......
...@@ -20,7 +20,6 @@ import os ...@@ -20,7 +20,6 @@ import os
import sys import sys
import re import re
import glob import glob
import io
def find_type_files(cur_dir, file_type, file_list=[]): def find_type_files(cur_dir, file_type, file_list=[]):
...@@ -120,7 +119,7 @@ if __name__ == '__main__': ...@@ -120,7 +119,7 @@ if __name__ == '__main__':
custom_pattern2 = custom_pattern2[:-1] custom_pattern2 = custom_pattern2[:-1]
all_matches = [] all_matches = []
with io.open(op_file, 'r', encoding='utf-8') as f: with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines()) content = ''.join(f.readlines())
op, op_count = remove_grad_op_and_kernel(content, op_pattern1, op, op_count = remove_grad_op_and_kernel(content, op_pattern1,
...@@ -153,7 +152,7 @@ if __name__ == '__main__': ...@@ -153,7 +152,7 @@ if __name__ == '__main__':
for i in all_matches: for i in all_matches:
content = content.replace(i, '') content = content.replace(i, '')
with io.open(op_file, 'w', encoding='utf-8') as f: with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content)) f.write(u'{}'.format(content))
# 2. update operators/CMakeLists.txt # 2. update operators/CMakeLists.txt
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册