diff --git a/python/paddle/dataset/wmt16.py b/python/paddle/dataset/wmt16.py index cc3857aca982a868ddaadd5be7b02e50274e7d67..64ddc2214716e6b95bb6b87473b3acabe178357d 100644 --- a/python/paddle/dataset/wmt16.py +++ b/python/paddle/dataset/wmt16.py @@ -62,15 +62,14 @@ def __build_dict(tar_file, dict_size, save_path, lang): word_dict[w] += 1 with open(save_path, "wb") as fout: - fout.write( - cpt.to_bytes("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK))) + fout.write(("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)).encode()) for idx, word in enumerate( sorted(six.iteritems(word_dict), key=lambda x: x[1], reverse=True)): if idx + 3 == dict_size: break - fout.write(cpt.to_bytes(word[0])) - fout.write(cpt.to_bytes('\n')) + fout.write(word[0].encode()) + fout.write(b'\n') def __load_dict(tar_file, dict_size, lang, reverse=False): diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index ac2fd8caf8e85d6f95263165ccd709596ee278fa..e9babc79c3ac88b0e30b96f61d7d534748cb03a3 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -391,10 +391,10 @@ def _infer_var_data_type_shape_(grad_var_name, block): """ Infer the data type and shape of given grad variable """ - grad_var = block.desc.find_var(cpt.to_bytes(grad_var_name)) + grad_var = block.desc.find_var(grad_var_name.encode()) fwd_name = _strip_grad_suffix_(grad_var_name) - if block.desc.has_var_recursive(cpt.to_bytes(fwd_name)): - fwd_var = block.desc.find_var_recursive(cpt.to_bytes(fwd_name)) + if block.desc.has_var_recursive(fwd_name.encode()): + fwd_var = block.desc.find_var_recursive(fwd_name.encode()) grad_var.set_dtype(fwd_var.dtype()) grad_var.set_shape(fwd_var.shape()) else: @@ -1457,7 +1457,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): if grad_var_ins: existing_grad_var_ins = [ var for var in grad_var_ins - if block.desc.has_var_recursive(cpt.to_bytes(var)) + if block.desc.has_var_recursive(var.encode()) or var in parent_op_vars ] if not existing_grad_var_ins: @@ -1476,10 +1476,10 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): new_vars = set() # create new gradient variables for grad_var_name in op_desc.output_arg_names(): - if block.desc.has_var_recursive(cpt.to_bytes( - grad_var_name)) or grad_var_name == core.empty_var_name(): + if block.desc.has_var_recursive(grad_var_name.encode( + )) or grad_var_name == core.empty_var_name(): continue - block.desc.var(cpt.to_bytes(grad_var_name)) + block.desc.var(grad_var_name.encode()) new_vars.add(grad_var_name) if grad_var_name not in grad_to_var: continue @@ -1930,8 +1930,8 @@ def _get_output_names(cur_block, targets): if _some_in_set_(op.desc.output_arg_names(), current_output_names): for name in op.desc.input_arg_names(): current_output_names.add(name) - if not block.desc.find_var(cpt.to_bytes(name)) \ - and parent_block.desc.find_var(cpt.to_bytes(name)): + if not block.desc.find_var(name.encode()) \ + and parent_block.desc.find_var(name.encode()): parent_block_output_names.add(name) block = parent_block diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py index 8592e9e8919520549a21df94c17547f2e69788e3..2e545a91a990916800638a37d070e4ad012b2155 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py @@ -30,8 +30,7 @@ from paddle.fluid.framework import _apply_pass from paddle.fluid.contrib.mixed_precision.decorator import AutoMixedPrecisionLists from paddle.fluid.contrib.mixed_precision.fp16_utils import rewrite_program, cast_model_to_fp16 from paddle.fluid.dygraph.amp.auto_cast import _in_amp_guard, _in_pure_fp16_guard -import paddle.compat as cpt -from paddle import _C_ops, _legacy_C_ops +from paddle import _legacy_C_ops class NestSequence(object): @@ -833,8 +832,7 @@ class PartialProgramLayer: # be user wanted result. for param in params: grad_name = param.name + core.grad_var_suffix() - grad_var = train_program.desc.block(0).find_var( - cpt.to_bytes(grad_name)) + grad_var = train_program.desc.block(0).find_var(grad_name.encode()) # NOTE: cannot find var desc maybe no problem, such as in batch_norm if grad_var is None: continue diff --git a/python/paddle/fluid/dygraph/io.py b/python/paddle/fluid/dygraph/io.py index cf5a85b44f0b432906e2b57ac813adc021e5e626..6671499e1c4ef9078203d11571522291cc2e0b74 100644 --- a/python/paddle/fluid/dygraph/io.py +++ b/python/paddle/fluid/dygraph/io.py @@ -145,7 +145,7 @@ def _append_loaded_suffix_to_var(program_desc): var_desc.set_name(new_name) for block_idx in six.moves.range(program_desc.num_blocks()): block = program_desc.block(block_idx) - block._rename_var(cpt.to_bytes(old_name), cpt.to_bytes(new_name)) + block._rename_var(old_name.encode(), new_name.encode()) for op_idx in six.moves.range(block.op_size()): op = block.op(op_idx) op._rename_input(old_name, new_name) @@ -224,8 +224,7 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None): else: name_new = name_old if name_old != name_new: - cur_block._rename_var(cpt.to_bytes(name_old), - cpt.to_bytes(name_new)) + cur_block._rename_var(name_old.encode(), name_new.encode()) if not is_double_grad_var: dict_rename_var_old_new[name_old] = name_new dict_rename_var_new_old[name_new] = name_old @@ -259,11 +258,11 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None): op._rename_input( input_arg_name, dict_rename_var_old_new[input_arg_name]) - if cur_block.has_var(cpt.to_bytes(input_arg_name)): + if cur_block.has_var(input_arg_name.encode()): cur_block._rename_var( - cpt.to_bytes(input_arg_name), - cpt.to_bytes( - dict_rename_var_old_new[input_arg_name])) + input_arg_name.encode(), + dict_rename_var_old_new[input_arg_name].encode( + )) for output_arg_name in op.output_arg_names(): if output_arg_name in dict_rename_var_old_new: if output_arg_name != dict_rename_var_old_new[ @@ -271,11 +270,11 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None): op._rename_output( output_arg_name, dict_rename_var_old_new[output_arg_name]) - if cur_block.has_var(cpt.to_bytes(output_arg_name)): + if cur_block.has_var(output_arg_name.encode()): cur_block._rename_var( - cpt.to_bytes(output_arg_name), - cpt.to_bytes( - dict_rename_var_old_new[output_arg_name])) + output_arg_name.encode(), + dict_rename_var_old_new[output_arg_name].encode( + )) program_desc.flush() return dict_rename_var_new_old, dict_rename_var_old_new @@ -413,25 +412,25 @@ class _ProgramHolder(object): op = root_block.op(i) if op.type() == 'feed': ops_to_remove.append(i) - feed_var_name = cpt.to_bytes(op.input('X')[0]) + feed_var_name = op.input('X')[0].encode() root_block._remove_var(feed_var_name) self._input_descs.append( - root_block.find_var(cpt.to_bytes(op.output('Out')[0]))) + root_block.find_var(op.output('Out')[0].encode())) elif op.type() == 'scale' and op.output('Out')[0].startswith( 'save_infer_model/scale_'): ops_to_remove.append(i) - out_var_name = cpt.to_bytes(op.output('Out')[0]) + out_var_name = op.output('Out')[0].encode() root_block._remove_var(out_var_name) self._output_descs.append( - root_block.find_var(cpt.to_bytes(op.input('X')[0]))) + root_block.find_var(op.input('X')[0].encode())) elif op.type() == 'fetch': ops_to_remove.append(i) - fetch_var_name = cpt.to_bytes(op.output('Out')[0]) + fetch_var_name = op.output('Out')[0].encode() root_block._remove_var(fetch_var_name) # NOTE: some old pre-train models have no extra scale_op if not op.input('X')[0].startswith('save_infer_model/scale_'): self._output_descs.append( - root_block.find_var(cpt.to_bytes(op.input('X')[0]))) + root_block.find_var(op.input('X')[0].encode())) else: if op.has_attr("op_callstack"): op.remove_attr("op_callstack") @@ -937,7 +936,7 @@ def _run_dygraph(instance, input, program_holder): # be user wanted result. for persistable_var in persistable_vars: grad_var_name = persistable_var.name + core.grad_var_suffix() - grad_var = trace_program.block(0).find_var(cpt.to_bytes(grad_var_name)) + grad_var = trace_program.block(0).find_var(grad_var_name.encode()) # NOTE: cannot find var desc maybe not problem, # such as in batch_norm if grad_var is None: diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 3eb8c23a07b69e1e6204dcb6aad994bece924d22..09fe42e705742b67dd65825fca4be0072dbfbed1 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -27,7 +27,6 @@ from .framework import convert_np_dtype_to_dtype_, _apply_pass from . import core from . import unique_name from . import compiler -from .. import compat as cpt from .trainer_factory import TrainerFactory from .trainer_factory import FetchHandlerMonitor import copy @@ -1674,7 +1673,7 @@ class Executor(object): else: global_block = program.global_block() for varname in global_block.vars: - vardesc = global_block.desc.find_var(cpt.to_bytes(varname)) + vardesc = global_block.desc.find_var(varname.encode()) varobj = global_block.vars[varname] # Can not check var build by fluid.layers.data(), bucause fluid.layers.data() had not set need_check_feed diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 49c0bb24d6d203ef62e8fd877041ae229de6caa7..e709a02b388af020b1942c371b20b484fd02975f 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1391,10 +1391,10 @@ class Variable(object): is_new_var = False name = cpt.to_text(name) - self.desc = self.block.desc.find_var(cpt.to_bytes(name)) + self.desc = self.block.desc.find_var(name.encode()) if self.desc is None: - self.desc = self.block.desc.var(cpt.to_bytes(name)) + self.desc = self.block.desc.var(name.encode()) is_new_var = True if is_new_var: @@ -3691,9 +3691,9 @@ class Block(object): else: raise ValueError("unsupported var type: %s", type(v)) orig_var_type = v.type - self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name)) + self.desc._rename_var(name.encode(), new_name.encode()) # NOTE: v is destroyed by C++ after calling _rename_var. - d = self.desc.find_var(cpt.to_bytes(new_name)) + d = self.desc.find_var(new_name.encode()) if var_type == "Parameter": if in_dygraph_mode(): var = EagerParamBase(d.shape(), @@ -3744,7 +3744,7 @@ class Block(object): def _remove_var(self, name, sync=True): if sync == True: self._sync_with_cpp() - self.desc._remove_var(cpt.to_bytes(name)) + self.desc._remove_var(name.encode()) del self.vars[name] def create_parameter(self, *args, **kwargs): @@ -3953,7 +3953,7 @@ class Block(object): # sync variables removed from c++ end for var in list(self.vars.keys()): - if not self.desc.find_var(cpt.to_bytes(var)): + if not self.desc.find_var(var.encode()): self.vars.pop(var) # sync operators from cpp @@ -5839,7 +5839,7 @@ class Program(object): root_block._remove_op(0, read_op_idx + 1) for var in root_block.all_vars(): if var.type() == core.VarDesc.VarType.READER: - root_block._remove_var(cpt.to_bytes(var.name())) + root_block._remove_var(var.name().encode()) # change all `is_test` attributes to True for i in six.moves.range(res.desc.num_blocks()): diff --git a/python/paddle/fluid/ir.py b/python/paddle/fluid/ir.py index aca134a1df55a9982a3779e03313481cce712e5f..3e58af416d506fe738431c67ef0745f18c19cdda 100644 --- a/python/paddle/fluid/ir.py +++ b/python/paddle/fluid/ir.py @@ -158,7 +158,7 @@ class RegisterPassHelper(object): def _prune_program_desc(self, ops): for op_desc in ops: default_attrs = core.get_op_attrs_default_value( - paddle.compat.to_bytes(op_desc.type)) + op_desc.type.encode()) remove_attrs = list() for attr in op_desc.attrs: # attr must not in diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index a6044e926d03cdcb3f87a5611efad6fec4a3d9fc..289cb9b9044abdfeccb26cb14486fba6473dd522 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -2450,10 +2450,10 @@ class ConditionalBlock(object): new_vars = set() for grad_var_name in new_op_desc.output_arg_names(): - if grad_sub_block.desc.has_var_recursive(cpt.to_bytes( - grad_var_name)) or grad_var_name == core.empty_var_name(): + if grad_sub_block.desc.has_var_recursive(grad_var_name.encode( + )) or grad_var_name == core.empty_var_name(): continue - grad_sub_block.desc.var(cpt.to_bytes(grad_var_name)) + grad_sub_block.desc.var(grad_var_name.encode()) new_vars.add(grad_var_name) if grad_var_name not in op_grad_to_var: continue diff --git a/python/paddle/fluid/tests/unittests/ir/inference/program_config.py b/python/paddle/fluid/tests/unittests/ir/inference/program_config.py index abb353977b4e9d01fe0e95b3278ec931f0a7df86..5ad6f1c885cf95a28faa8f2c7a4cdf161fc9ed27 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/program_config.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/program_config.py @@ -18,7 +18,6 @@ import enum import paddle import paddle.fluid as fluid import paddle.fluid.core as core -from paddle import compat as cpt from paddle.fluid.initializer import NumpyArrayInitializer from paddle.fluid.framework import convert_np_dtype_to_dtype_ @@ -120,7 +119,7 @@ class BlockConfig: def fill_block_desc(self, block_desc): for name in self.vars: - var_desc = block_desc.var(cpt.to_bytes(name)) + var_desc = block_desc.var(name.encode()) var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) if self.vars_lod_level is not None and name in self.vars_lod_level.keys( ): @@ -147,9 +146,9 @@ class BlockConfig: for name, values in op_config.outputs.items(): op_desc.set_output(name, values) for v in values: - if block_desc.has_var_recursive(cpt.to_bytes(v)): + if block_desc.has_var_recursive(v.encode()): continue - var_desc = block_desc.var(cpt.to_bytes(v)) + var_desc = block_desc.var(v.encode()) var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) if op_config.outputs_var_type is not None and v in op_config.outputs_var_type.keys( ): @@ -216,13 +215,13 @@ def create_fake_model(program_config): util_program = fluid.Program() main_block_desc = main_program_desc.block(0) - var_desc = main_block_desc.var(cpt.to_bytes("feed")) + var_desc = main_block_desc.var(b"feed") var_desc.set_type(core.VarDesc.VarType.FEED_MINIBATCH) var_desc.set_persistable(True) index = 0 for name, tensor_config in program_config.inputs.items(): - var_desc = main_block_desc.var(cpt.to_bytes(name)) + var_desc = main_block_desc.var(name.encode()) var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) var_desc.set_dtype(convert_np_dtype_to_dtype_(tensor_config.dtype)) var_desc.set_shape(tensor_config.shape) @@ -239,7 +238,7 @@ def create_fake_model(program_config): save_var_map = {} for name, tensor_config in program_config.weights.items(): - var_desc = main_block_desc.var(cpt.to_bytes(name)) + var_desc = main_block_desc.var(name.encode()) var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) var_desc.set_dtype(convert_np_dtype_to_dtype_(tensor_config.dtype)) var_desc.set_shape(tensor_config.shape) @@ -280,9 +279,9 @@ def create_fake_model(program_config): for name, values in op_config.outputs.items(): op_desc.set_output(name, values) for v in values: - if main_block_desc.has_var_recursive(cpt.to_bytes(v)): + if main_block_desc.has_var_recursive(v.encode()): continue - var_desc = main_block_desc.var(cpt.to_bytes(v)) + var_desc = main_block_desc.var(v.encode()) var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) if op_config.outputs_var_type is not None and v in op_config.outputs_var_type.keys( ): @@ -303,7 +302,7 @@ def create_fake_model(program_config): op_desc.check_attrs() for index, name in enumerate(program_config.outputs): - var_desc = main_block_desc.var(cpt.to_bytes("fetch")) + var_desc = main_block_desc.var(b"fetch") var_desc.set_type(core.VarDesc.VarType.FETCH_LIST) var_desc.set_need_check_feed(True) op_desc = main_block_desc.append_op() diff --git a/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py index 61df52cfd08348f559ccd36c22bc69fe1681a098..5401228d2835c515c4436c589c46fb58ed8247a2 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py @@ -24,7 +24,6 @@ from op_test import OpTest import paddle from paddle import _C_ops, _legacy_C_ops import paddle.fluid as fluid -from paddle import compat as cpt from paddle.fluid import core, framework, executor from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.framework import _in_eager_mode_ @@ -241,7 +240,7 @@ class RunProgramNPUOpTest(unittest.TestCase): grad_name = name + core.grad_var_suffix() for i in six.moves.range(self.program_desc.num_blocks()): block = self.program_desc.block(i) - var_desc = block.find_var_recursive(cpt.to_bytes(grad_name)) + var_desc = block.find_var_recursive(grad_name.encode()) return var_desc.type() if var_desc is not None else None diff --git a/python/paddle/fluid/tests/unittests/test_eager_run_program.py b/python/paddle/fluid/tests/unittests/test_eager_run_program.py index ad4ada7c3a5fd03f324bfc4c8f6023bb56fff53a..df151f27ec21a5ffd9026ac7e9a52a4cfb76a7d6 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_run_program.py +++ b/python/paddle/fluid/tests/unittests/test_eager_run_program.py @@ -48,7 +48,7 @@ def _append_backward_desc(main_program, outs): # for param in params: # grad_name = param.name + core.grad_var_suffix() # grad_var = train_program.desc.block(0).find_var( -# cpt.to_bytes(grad_name)) +# grad_name.encode()) # # NOTE: cannot find var desc maybe no problem, such as in batch_norm # if grad_var is None: # continue diff --git a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py index ee522bc77f753ee9a86367ae46aab43b020869c4..6022bf02602cbd01e35a4f4e04b6811d92821d56 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py @@ -14,7 +14,6 @@ import unittest import paddle.fluid.core as core -import paddle.compat as cpt from paddle.fluid.framework import Program @@ -112,7 +111,7 @@ class TestVarDesc(unittest.TestCase): def test_shape(self): program_desc = core.ProgramDesc() block = program_desc.block(0) - var = block.var(cpt.to_bytes('my_var')) + var = block.var(b'my_var') var.set_type(core.VarDesc.VarType.SELECTED_ROWS) src_shape = [3, 2, 10, 8] var.set_shape(src_shape) @@ -123,7 +122,7 @@ class TestVarDesc(unittest.TestCase): def test_multiple_shape(self): program_desc = core.ProgramDesc() block = program_desc.block(0) - var = block.var(cpt.to_bytes('my_reader')) + var = block.var(b'my_reader') var.set_type(core.VarDesc.VarType.READER) src_shapes = [[2, 3, 3], [4, 5], [6, 7, 8, 9]] var.set_shapes(src_shapes) @@ -134,7 +133,7 @@ class TestVarDesc(unittest.TestCase): def test_dtype(self): program_desc = core.ProgramDesc() block = program_desc.block(0) - var = block.var(cpt.to_bytes('my_var')) + var = block.var(b'my_var') var.set_type(core.VarDesc.VarType.LOD_TENSOR) var.set_dtype(core.VarDesc.VarType.INT32) self.assertEqual(core.VarDesc.VarType.INT32, var.dtype()) @@ -143,7 +142,7 @@ class TestVarDesc(unittest.TestCase): def test_multiple_dtype(self): program_desc = core.ProgramDesc() block = program_desc.block(0) - var = block.var(cpt.to_bytes('my_reader')) + var = block.var(b'my_reader') var.set_type(core.VarDesc.VarType.READER) src_types = [ core.VarDesc.VarType.INT32, core.VarDesc.VarType.FP64, @@ -156,7 +155,7 @@ class TestVarDesc(unittest.TestCase): def test_multiple_lod_level(self): program_desc = core.ProgramDesc() block = program_desc.block(0) - var = block.var(cpt.to_bytes('my_reader')) + var = block.var(b'my_reader') var.set_type(core.VarDesc.VarType.READER) src_types = [3, 1, 2] var.set_lod_levels(src_types) @@ -171,12 +170,12 @@ class TestBlockDesc(unittest.TestCase): self.assertIsNotNone(program_desc) block = program_desc.block(0) self.assertIsNotNone(block) - var1 = block.var(cpt.to_bytes("var1")) - var2 = block.var(cpt.to_bytes("var2")) - var3 = block.var(cpt.to_bytes("var3")) + var1 = block.var(b"var1") + var2 = block.var(b"var2") + var3 = block.var(b"var3") all_vars = block.all_vars() self.assertEqual(set(all_vars), {var1, var2, var3}) - var2_re = block.find_var(cpt.to_bytes("var2")) + var2_re = block.find_var(b"var2") self.assertEqual(var2_re, var2) def test_add_op(self): diff --git a/python/paddle/fluid/tests/unittests/test_pybind_interface.py b/python/paddle/fluid/tests/unittests/test_pybind_interface.py index efb2c1d7da5943c3587f9c4bb05b642d51556c5a..854aa7f3e9e76a99425e3e4b8704fe3ccad66020 100644 --- a/python/paddle/fluid/tests/unittests/test_pybind_interface.py +++ b/python/paddle/fluid/tests/unittests/test_pybind_interface.py @@ -14,14 +14,13 @@ import unittest from paddle.fluid import core -from paddle import compat as cpt class TestPybindInference(unittest.TestCase): # call get_op_attrs_default_value for c++ coverage rate def test_get_op_attrs_default_value(self): - core.get_op_attrs_default_value(cpt.to_bytes("fill_constant")) + core.get_op_attrs_default_value(b"fill_constant") # the default values of Op 'fill_constant' # diff --git a/python/paddle/fluid/tests/unittests/test_run_program_op.py b/python/paddle/fluid/tests/unittests/test_run_program_op.py index 56fda34c209387e54ffb04fcfdb64c667d4a882a..89642bb66168b411fae88de445a40c95799e5b99 100644 --- a/python/paddle/fluid/tests/unittests/test_run_program_op.py +++ b/python/paddle/fluid/tests/unittests/test_run_program_op.py @@ -20,7 +20,6 @@ import six import paddle from paddle import _legacy_C_ops import paddle.fluid as fluid -from paddle import compat as cpt from paddle.fluid import core, framework from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.framework import _in_eager_mode_ @@ -301,7 +300,7 @@ class RunProgramOpTest(unittest.TestCase): grad_name = name + core.grad_var_suffix() for i in six.moves.range(self.program_desc.num_blocks()): block = self.program_desc.block(i) - var_desc = block.find_var_recursive(cpt.to_bytes(grad_name)) + var_desc = block.find_var_recursive(grad_name.encode()) return var_desc.type() if var_desc is not None else None diff --git a/python/paddle/incubate/autograd/primx.py b/python/paddle/incubate/autograd/primx.py index e61a77791fd19a77fd0e479400ee29178d0cd98c..c57f651d344c34b156e2d37fab39d1049db33adc 100644 --- a/python/paddle/incubate/autograd/primx.py +++ b/python/paddle/incubate/autograd/primx.py @@ -15,7 +15,6 @@ from collections import OrderedDict import paddle -from paddle import compat as cpt from paddle.fluid import framework as framework from paddle.fluid.framework import Operator, default_main_program from paddle.incubate.autograd.utils import as_tensors @@ -220,7 +219,7 @@ class Transform(object): block = self.block for var in vars_to_erase: name = var.name - block.desc._remove_var(cpt.to_bytes(name)) + block.desc._remove_var(name.encode()) del block.vars[name] block._sync_with_cpp() @@ -512,7 +511,7 @@ def _lower(block, reverse, blacklist): assert var_name in to_bind_rev, 'var_name "{}" is not in to_bind_rev.'.format( var_name) if var_name != to_bind_rev[var_name]: - block.desc._remove_var(cpt.to_bytes(var_name)) + block.desc._remove_var(var_name.encode()) del block.vars[var_name] block._sync_with_cpp() diff --git a/python/paddle/text/datasets/wmt16.py b/python/paddle/text/datasets/wmt16.py index 4cac2f1ed387a142423624d0705ffd90aab0fd41..40e464cb0ebae45f275fb7af883fc4bca03cc131 100644 --- a/python/paddle/text/datasets/wmt16.py +++ b/python/paddle/text/datasets/wmt16.py @@ -170,14 +170,14 @@ class WMT16(Dataset): with open(dict_path, "wb") as fout: fout.write( - cpt.to_bytes("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK))) + ("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)).encode()) for idx, word in enumerate( sorted(six.iteritems(word_dict), key=lambda x: x[1], reverse=True)): if idx + 3 == dict_size: break - fout.write(cpt.to_bytes(word[0])) - fout.write(cpt.to_bytes('\n')) + fout.write(word[0].encode()) + fout.write(b'\n') def _load_data(self): # the index for start mark, end mark, and unk are the same in source diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index f2c04f3cba8f857eb151718a8fe7ac0c3c653880..087c88d937be632c0d92a77926c56921c6c91067 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -14,7 +14,6 @@ import paddle.fluid.framework as framework from paddle.fluid import core -from paddle import compat as cpt # collect original ops: op which has both inference and grid defination @@ -59,7 +58,7 @@ QUANT = "quant" def get_attr_default_value(op_name): - return core.get_op_attrs_default_value(cpt.to_bytes(op_name)) + return core.get_op_attrs_default_value(op_name.encode()) def get_vars_info(op_vars_proto): diff --git a/tools/print_op_desc.py b/tools/print_op_desc.py index b85103a7a25e164c83880b518caa803d53923892..cf46849add4b1751b579ffdb25945f4b2d44e32f 100644 --- a/tools/print_op_desc.py +++ b/tools/print_op_desc.py @@ -45,7 +45,6 @@ Usage: import paddle.fluid.framework as framework from paddle.fluid import core import json -from paddle import compat as cpt INPUTS = "Inputs" OUTPUTS = "Outputs" @@ -64,7 +63,7 @@ QUANT = "quant" def get_attr_default_value(op_name): - return core.get_op_attrs_default_value(cpt.to_bytes(op_name)) + return core.get_op_attrs_default_value(op_name.encode()) def get_vars_info(op_vars_proto):