未验证 提交 198c7993 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][py2] remove `compat` module (to_bytes) (#47035)

* [CodeStyle][py2] remove `compat` module (to_bytes)

* remove some unused imports

* clean up to_bytes definition and unittests

* Revert "clean up to_bytes definition and unittests"

This reverts commit e726539e1768172a411ff60e63fab82f164343cf.

* use `b` prefix instead of `encode()`
上级 f9c1cdc1
......@@ -62,15 +62,14 @@ def __build_dict(tar_file, dict_size, save_path, lang):
word_dict[w] += 1
with open(save_path, "wb") as fout:
fout.write(
cpt.to_bytes("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)))
fout.write(("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)).encode())
for idx, word in enumerate(
sorted(six.iteritems(word_dict),
key=lambda x: x[1],
reverse=True)):
if idx + 3 == dict_size: break
fout.write(cpt.to_bytes(word[0]))
fout.write(cpt.to_bytes('\n'))
fout.write(word[0].encode())
fout.write(b'\n')
def __load_dict(tar_file, dict_size, lang, reverse=False):
......
......@@ -391,10 +391,10 @@ def _infer_var_data_type_shape_(grad_var_name, block):
"""
Infer the data type and shape of given grad variable
"""
grad_var = block.desc.find_var(cpt.to_bytes(grad_var_name))
grad_var = block.desc.find_var(grad_var_name.encode())
fwd_name = _strip_grad_suffix_(grad_var_name)
if block.desc.has_var_recursive(cpt.to_bytes(fwd_name)):
fwd_var = block.desc.find_var_recursive(cpt.to_bytes(fwd_name))
if block.desc.has_var_recursive(fwd_name.encode()):
fwd_var = block.desc.find_var_recursive(fwd_name.encode())
grad_var.set_dtype(fwd_var.dtype())
grad_var.set_shape(fwd_var.shape())
else:
......@@ -1457,7 +1457,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
if grad_var_ins:
existing_grad_var_ins = [
var for var in grad_var_ins
if block.desc.has_var_recursive(cpt.to_bytes(var))
if block.desc.has_var_recursive(var.encode())
or var in parent_op_vars
]
if not existing_grad_var_ins:
......@@ -1476,10 +1476,10 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
new_vars = set()
# create new gradient variables
for grad_var_name in op_desc.output_arg_names():
if block.desc.has_var_recursive(cpt.to_bytes(
grad_var_name)) or grad_var_name == core.empty_var_name():
if block.desc.has_var_recursive(grad_var_name.encode(
)) or grad_var_name == core.empty_var_name():
continue
block.desc.var(cpt.to_bytes(grad_var_name))
block.desc.var(grad_var_name.encode())
new_vars.add(grad_var_name)
if grad_var_name not in grad_to_var:
continue
......@@ -1930,8 +1930,8 @@ def _get_output_names(cur_block, targets):
if _some_in_set_(op.desc.output_arg_names(), current_output_names):
for name in op.desc.input_arg_names():
current_output_names.add(name)
if not block.desc.find_var(cpt.to_bytes(name)) \
and parent_block.desc.find_var(cpt.to_bytes(name)):
if not block.desc.find_var(name.encode()) \
and parent_block.desc.find_var(name.encode()):
parent_block_output_names.add(name)
block = parent_block
......
......@@ -30,8 +30,7 @@ from paddle.fluid.framework import _apply_pass
from paddle.fluid.contrib.mixed_precision.decorator import AutoMixedPrecisionLists
from paddle.fluid.contrib.mixed_precision.fp16_utils import rewrite_program, cast_model_to_fp16
from paddle.fluid.dygraph.amp.auto_cast import _in_amp_guard, _in_pure_fp16_guard
import paddle.compat as cpt
from paddle import _C_ops, _legacy_C_ops
from paddle import _legacy_C_ops
class NestSequence(object):
......@@ -833,8 +832,7 @@ class PartialProgramLayer:
# be user wanted result.
for param in params:
grad_name = param.name + core.grad_var_suffix()
grad_var = train_program.desc.block(0).find_var(
cpt.to_bytes(grad_name))
grad_var = train_program.desc.block(0).find_var(grad_name.encode())
# NOTE: cannot find var desc maybe no problem, such as in batch_norm
if grad_var is None:
continue
......
......@@ -145,7 +145,7 @@ def _append_loaded_suffix_to_var(program_desc):
var_desc.set_name(new_name)
for block_idx in six.moves.range(program_desc.num_blocks()):
block = program_desc.block(block_idx)
block._rename_var(cpt.to_bytes(old_name), cpt.to_bytes(new_name))
block._rename_var(old_name.encode(), new_name.encode())
for op_idx in six.moves.range(block.op_size()):
op = block.op(op_idx)
op._rename_input(old_name, new_name)
......@@ -224,8 +224,7 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None):
else:
name_new = name_old
if name_old != name_new:
cur_block._rename_var(cpt.to_bytes(name_old),
cpt.to_bytes(name_new))
cur_block._rename_var(name_old.encode(), name_new.encode())
if not is_double_grad_var:
dict_rename_var_old_new[name_old] = name_new
dict_rename_var_new_old[name_new] = name_old
......@@ -259,11 +258,11 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None):
op._rename_input(
input_arg_name,
dict_rename_var_old_new[input_arg_name])
if cur_block.has_var(cpt.to_bytes(input_arg_name)):
if cur_block.has_var(input_arg_name.encode()):
cur_block._rename_var(
cpt.to_bytes(input_arg_name),
cpt.to_bytes(
dict_rename_var_old_new[input_arg_name]))
input_arg_name.encode(),
dict_rename_var_old_new[input_arg_name].encode(
))
for output_arg_name in op.output_arg_names():
if output_arg_name in dict_rename_var_old_new:
if output_arg_name != dict_rename_var_old_new[
......@@ -271,11 +270,11 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None):
op._rename_output(
output_arg_name,
dict_rename_var_old_new[output_arg_name])
if cur_block.has_var(cpt.to_bytes(output_arg_name)):
if cur_block.has_var(output_arg_name.encode()):
cur_block._rename_var(
cpt.to_bytes(output_arg_name),
cpt.to_bytes(
dict_rename_var_old_new[output_arg_name]))
output_arg_name.encode(),
dict_rename_var_old_new[output_arg_name].encode(
))
program_desc.flush()
return dict_rename_var_new_old, dict_rename_var_old_new
......@@ -413,25 +412,25 @@ class _ProgramHolder(object):
op = root_block.op(i)
if op.type() == 'feed':
ops_to_remove.append(i)
feed_var_name = cpt.to_bytes(op.input('X')[0])
feed_var_name = op.input('X')[0].encode()
root_block._remove_var(feed_var_name)
self._input_descs.append(
root_block.find_var(cpt.to_bytes(op.output('Out')[0])))
root_block.find_var(op.output('Out')[0].encode()))
elif op.type() == 'scale' and op.output('Out')[0].startswith(
'save_infer_model/scale_'):
ops_to_remove.append(i)
out_var_name = cpt.to_bytes(op.output('Out')[0])
out_var_name = op.output('Out')[0].encode()
root_block._remove_var(out_var_name)
self._output_descs.append(
root_block.find_var(cpt.to_bytes(op.input('X')[0])))
root_block.find_var(op.input('X')[0].encode()))
elif op.type() == 'fetch':
ops_to_remove.append(i)
fetch_var_name = cpt.to_bytes(op.output('Out')[0])
fetch_var_name = op.output('Out')[0].encode()
root_block._remove_var(fetch_var_name)
# NOTE: some old pre-train models have no extra scale_op
if not op.input('X')[0].startswith('save_infer_model/scale_'):
self._output_descs.append(
root_block.find_var(cpt.to_bytes(op.input('X')[0])))
root_block.find_var(op.input('X')[0].encode()))
else:
if op.has_attr("op_callstack"):
op.remove_attr("op_callstack")
......@@ -937,7 +936,7 @@ def _run_dygraph(instance, input, program_holder):
# be user wanted result.
for persistable_var in persistable_vars:
grad_var_name = persistable_var.name + core.grad_var_suffix()
grad_var = trace_program.block(0).find_var(cpt.to_bytes(grad_var_name))
grad_var = trace_program.block(0).find_var(grad_var_name.encode())
# NOTE: cannot find var desc maybe not problem,
# such as in batch_norm
if grad_var is None:
......
......@@ -27,7 +27,6 @@ from .framework import convert_np_dtype_to_dtype_, _apply_pass
from . import core
from . import unique_name
from . import compiler
from .. import compat as cpt
from .trainer_factory import TrainerFactory
from .trainer_factory import FetchHandlerMonitor
import copy
......@@ -1674,7 +1673,7 @@ class Executor(object):
else:
global_block = program.global_block()
for varname in global_block.vars:
vardesc = global_block.desc.find_var(cpt.to_bytes(varname))
vardesc = global_block.desc.find_var(varname.encode())
varobj = global_block.vars[varname]
# Can not check var build by fluid.layers.data(), bucause fluid.layers.data() had not set need_check_feed
......
......@@ -1391,10 +1391,10 @@ class Variable(object):
is_new_var = False
name = cpt.to_text(name)
self.desc = self.block.desc.find_var(cpt.to_bytes(name))
self.desc = self.block.desc.find_var(name.encode())
if self.desc is None:
self.desc = self.block.desc.var(cpt.to_bytes(name))
self.desc = self.block.desc.var(name.encode())
is_new_var = True
if is_new_var:
......@@ -3691,9 +3691,9 @@ class Block(object):
else:
raise ValueError("unsupported var type: %s", type(v))
orig_var_type = v.type
self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))
self.desc._rename_var(name.encode(), new_name.encode())
# NOTE: v is destroyed by C++ after calling _rename_var.
d = self.desc.find_var(cpt.to_bytes(new_name))
d = self.desc.find_var(new_name.encode())
if var_type == "Parameter":
if in_dygraph_mode():
var = EagerParamBase(d.shape(),
......@@ -3744,7 +3744,7 @@ class Block(object):
def _remove_var(self, name, sync=True):
if sync == True:
self._sync_with_cpp()
self.desc._remove_var(cpt.to_bytes(name))
self.desc._remove_var(name.encode())
del self.vars[name]
def create_parameter(self, *args, **kwargs):
......@@ -3953,7 +3953,7 @@ class Block(object):
# sync variables removed from c++ end
for var in list(self.vars.keys()):
if not self.desc.find_var(cpt.to_bytes(var)):
if not self.desc.find_var(var.encode()):
self.vars.pop(var)
# sync operators from cpp
......@@ -5839,7 +5839,7 @@ class Program(object):
root_block._remove_op(0, read_op_idx + 1)
for var in root_block.all_vars():
if var.type() == core.VarDesc.VarType.READER:
root_block._remove_var(cpt.to_bytes(var.name()))
root_block._remove_var(var.name().encode())
# change all `is_test` attributes to True
for i in six.moves.range(res.desc.num_blocks()):
......
......@@ -158,7 +158,7 @@ class RegisterPassHelper(object):
def _prune_program_desc(self, ops):
for op_desc in ops:
default_attrs = core.get_op_attrs_default_value(
paddle.compat.to_bytes(op_desc.type))
op_desc.type.encode())
remove_attrs = list()
for attr in op_desc.attrs:
# attr must not in
......
......@@ -2450,10 +2450,10 @@ class ConditionalBlock(object):
new_vars = set()
for grad_var_name in new_op_desc.output_arg_names():
if grad_sub_block.desc.has_var_recursive(cpt.to_bytes(
grad_var_name)) or grad_var_name == core.empty_var_name():
if grad_sub_block.desc.has_var_recursive(grad_var_name.encode(
)) or grad_var_name == core.empty_var_name():
continue
grad_sub_block.desc.var(cpt.to_bytes(grad_var_name))
grad_sub_block.desc.var(grad_var_name.encode())
new_vars.add(grad_var_name)
if grad_var_name not in op_grad_to_var:
continue
......
......@@ -18,7 +18,6 @@ import enum
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle import compat as cpt
from paddle.fluid.initializer import NumpyArrayInitializer
from paddle.fluid.framework import convert_np_dtype_to_dtype_
......@@ -120,7 +119,7 @@ class BlockConfig:
def fill_block_desc(self, block_desc):
for name in self.vars:
var_desc = block_desc.var(cpt.to_bytes(name))
var_desc = block_desc.var(name.encode())
var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
if self.vars_lod_level is not None and name in self.vars_lod_level.keys(
):
......@@ -147,9 +146,9 @@ class BlockConfig:
for name, values in op_config.outputs.items():
op_desc.set_output(name, values)
for v in values:
if block_desc.has_var_recursive(cpt.to_bytes(v)):
if block_desc.has_var_recursive(v.encode()):
continue
var_desc = block_desc.var(cpt.to_bytes(v))
var_desc = block_desc.var(v.encode())
var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
if op_config.outputs_var_type is not None and v in op_config.outputs_var_type.keys(
):
......@@ -216,13 +215,13 @@ def create_fake_model(program_config):
util_program = fluid.Program()
main_block_desc = main_program_desc.block(0)
var_desc = main_block_desc.var(cpt.to_bytes("feed"))
var_desc = main_block_desc.var(b"feed")
var_desc.set_type(core.VarDesc.VarType.FEED_MINIBATCH)
var_desc.set_persistable(True)
index = 0
for name, tensor_config in program_config.inputs.items():
var_desc = main_block_desc.var(cpt.to_bytes(name))
var_desc = main_block_desc.var(name.encode())
var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
var_desc.set_dtype(convert_np_dtype_to_dtype_(tensor_config.dtype))
var_desc.set_shape(tensor_config.shape)
......@@ -239,7 +238,7 @@ def create_fake_model(program_config):
save_var_map = {}
for name, tensor_config in program_config.weights.items():
var_desc = main_block_desc.var(cpt.to_bytes(name))
var_desc = main_block_desc.var(name.encode())
var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
var_desc.set_dtype(convert_np_dtype_to_dtype_(tensor_config.dtype))
var_desc.set_shape(tensor_config.shape)
......@@ -280,9 +279,9 @@ def create_fake_model(program_config):
for name, values in op_config.outputs.items():
op_desc.set_output(name, values)
for v in values:
if main_block_desc.has_var_recursive(cpt.to_bytes(v)):
if main_block_desc.has_var_recursive(v.encode()):
continue
var_desc = main_block_desc.var(cpt.to_bytes(v))
var_desc = main_block_desc.var(v.encode())
var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
if op_config.outputs_var_type is not None and v in op_config.outputs_var_type.keys(
):
......@@ -303,7 +302,7 @@ def create_fake_model(program_config):
op_desc.check_attrs()
for index, name in enumerate(program_config.outputs):
var_desc = main_block_desc.var(cpt.to_bytes("fetch"))
var_desc = main_block_desc.var(b"fetch")
var_desc.set_type(core.VarDesc.VarType.FETCH_LIST)
var_desc.set_need_check_feed(True)
op_desc = main_block_desc.append_op()
......
......@@ -24,7 +24,6 @@ from op_test import OpTest
import paddle
from paddle import _C_ops, _legacy_C_ops
import paddle.fluid as fluid
from paddle import compat as cpt
from paddle.fluid import core, framework, executor
from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.framework import _in_eager_mode_
......@@ -241,7 +240,7 @@ class RunProgramNPUOpTest(unittest.TestCase):
grad_name = name + core.grad_var_suffix()
for i in six.moves.range(self.program_desc.num_blocks()):
block = self.program_desc.block(i)
var_desc = block.find_var_recursive(cpt.to_bytes(grad_name))
var_desc = block.find_var_recursive(grad_name.encode())
return var_desc.type() if var_desc is not None else None
......
......@@ -48,7 +48,7 @@ def _append_backward_desc(main_program, outs):
# for param in params:
# grad_name = param.name + core.grad_var_suffix()
# grad_var = train_program.desc.block(0).find_var(
# cpt.to_bytes(grad_name))
# grad_name.encode())
# # NOTE: cannot find var desc maybe no problem, such as in batch_norm
# if grad_var is None:
# continue
......
......@@ -14,7 +14,6 @@
import unittest
import paddle.fluid.core as core
import paddle.compat as cpt
from paddle.fluid.framework import Program
......@@ -112,7 +111,7 @@ class TestVarDesc(unittest.TestCase):
def test_shape(self):
program_desc = core.ProgramDesc()
block = program_desc.block(0)
var = block.var(cpt.to_bytes('my_var'))
var = block.var(b'my_var')
var.set_type(core.VarDesc.VarType.SELECTED_ROWS)
src_shape = [3, 2, 10, 8]
var.set_shape(src_shape)
......@@ -123,7 +122,7 @@ class TestVarDesc(unittest.TestCase):
def test_multiple_shape(self):
program_desc = core.ProgramDesc()
block = program_desc.block(0)
var = block.var(cpt.to_bytes('my_reader'))
var = block.var(b'my_reader')
var.set_type(core.VarDesc.VarType.READER)
src_shapes = [[2, 3, 3], [4, 5], [6, 7, 8, 9]]
var.set_shapes(src_shapes)
......@@ -134,7 +133,7 @@ class TestVarDesc(unittest.TestCase):
def test_dtype(self):
program_desc = core.ProgramDesc()
block = program_desc.block(0)
var = block.var(cpt.to_bytes('my_var'))
var = block.var(b'my_var')
var.set_type(core.VarDesc.VarType.LOD_TENSOR)
var.set_dtype(core.VarDesc.VarType.INT32)
self.assertEqual(core.VarDesc.VarType.INT32, var.dtype())
......@@ -143,7 +142,7 @@ class TestVarDesc(unittest.TestCase):
def test_multiple_dtype(self):
program_desc = core.ProgramDesc()
block = program_desc.block(0)
var = block.var(cpt.to_bytes('my_reader'))
var = block.var(b'my_reader')
var.set_type(core.VarDesc.VarType.READER)
src_types = [
core.VarDesc.VarType.INT32, core.VarDesc.VarType.FP64,
......@@ -156,7 +155,7 @@ class TestVarDesc(unittest.TestCase):
def test_multiple_lod_level(self):
program_desc = core.ProgramDesc()
block = program_desc.block(0)
var = block.var(cpt.to_bytes('my_reader'))
var = block.var(b'my_reader')
var.set_type(core.VarDesc.VarType.READER)
src_types = [3, 1, 2]
var.set_lod_levels(src_types)
......@@ -171,12 +170,12 @@ class TestBlockDesc(unittest.TestCase):
self.assertIsNotNone(program_desc)
block = program_desc.block(0)
self.assertIsNotNone(block)
var1 = block.var(cpt.to_bytes("var1"))
var2 = block.var(cpt.to_bytes("var2"))
var3 = block.var(cpt.to_bytes("var3"))
var1 = block.var(b"var1")
var2 = block.var(b"var2")
var3 = block.var(b"var3")
all_vars = block.all_vars()
self.assertEqual(set(all_vars), {var1, var2, var3})
var2_re = block.find_var(cpt.to_bytes("var2"))
var2_re = block.find_var(b"var2")
self.assertEqual(var2_re, var2)
def test_add_op(self):
......
......@@ -14,14 +14,13 @@
import unittest
from paddle.fluid import core
from paddle import compat as cpt
class TestPybindInference(unittest.TestCase):
# call get_op_attrs_default_value for c++ coverage rate
def test_get_op_attrs_default_value(self):
core.get_op_attrs_default_value(cpt.to_bytes("fill_constant"))
core.get_op_attrs_default_value(b"fill_constant")
# the default values of Op 'fill_constant'
#
......
......@@ -20,7 +20,6 @@ import six
import paddle
from paddle import _legacy_C_ops
import paddle.fluid as fluid
from paddle import compat as cpt
from paddle.fluid import core, framework
from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.framework import _in_eager_mode_
......@@ -301,7 +300,7 @@ class RunProgramOpTest(unittest.TestCase):
grad_name = name + core.grad_var_suffix()
for i in six.moves.range(self.program_desc.num_blocks()):
block = self.program_desc.block(i)
var_desc = block.find_var_recursive(cpt.to_bytes(grad_name))
var_desc = block.find_var_recursive(grad_name.encode())
return var_desc.type() if var_desc is not None else None
......
......@@ -15,7 +15,6 @@
from collections import OrderedDict
import paddle
from paddle import compat as cpt
from paddle.fluid import framework as framework
from paddle.fluid.framework import Operator, default_main_program
from paddle.incubate.autograd.utils import as_tensors
......@@ -220,7 +219,7 @@ class Transform(object):
block = self.block
for var in vars_to_erase:
name = var.name
block.desc._remove_var(cpt.to_bytes(name))
block.desc._remove_var(name.encode())
del block.vars[name]
block._sync_with_cpp()
......@@ -512,7 +511,7 @@ def _lower(block, reverse, blacklist):
assert var_name in to_bind_rev, 'var_name "{}" is not in to_bind_rev.'.format(
var_name)
if var_name != to_bind_rev[var_name]:
block.desc._remove_var(cpt.to_bytes(var_name))
block.desc._remove_var(var_name.encode())
del block.vars[var_name]
block._sync_with_cpp()
......
......@@ -170,14 +170,14 @@ class WMT16(Dataset):
with open(dict_path, "wb") as fout:
fout.write(
cpt.to_bytes("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)))
("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)).encode())
for idx, word in enumerate(
sorted(six.iteritems(word_dict),
key=lambda x: x[1],
reverse=True)):
if idx + 3 == dict_size: break
fout.write(cpt.to_bytes(word[0]))
fout.write(cpt.to_bytes('\n'))
fout.write(word[0].encode())
fout.write(b'\n')
def _load_data(self):
# the index for start mark, end mark, and unk are the same in source
......
......@@ -14,7 +14,6 @@
import paddle.fluid.framework as framework
from paddle.fluid import core
from paddle import compat as cpt
# collect original ops: op which has both inference and grid defination
......@@ -59,7 +58,7 @@ QUANT = "quant"
def get_attr_default_value(op_name):
return core.get_op_attrs_default_value(cpt.to_bytes(op_name))
return core.get_op_attrs_default_value(op_name.encode())
def get_vars_info(op_vars_proto):
......
......@@ -45,7 +45,6 @@ Usage:
import paddle.fluid.framework as framework
from paddle.fluid import core
import json
from paddle import compat as cpt
INPUTS = "Inputs"
OUTPUTS = "Outputs"
......@@ -64,7 +63,7 @@ QUANT = "quant"
def get_attr_default_value(op_name):
return core.get_op_attrs_default_value(cpt.to_bytes(op_name))
return core.get_op_attrs_default_value(op_name.encode())
def get_vars_info(op_vars_proto):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册