未验证 提交 e6fb551c 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][py2] remove `six` package (part 1) (#46965)

上级 3684ad19
......@@ -19,7 +19,7 @@ import os
import sys
from paddle.dataset.common import download
import tarfile
from six.moves import StringIO
from io import StringIO
import hashlib
import tarfile
import argparse
......
......@@ -31,8 +31,7 @@ import numpy
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import tarfile
import six
from six.moves import cPickle as pickle
import pickle
__all__ = []
......@@ -46,11 +45,10 @@ CIFAR100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85'
def reader_creator(filename, sub_name, cycle=False):
def read_batch(batch):
data = batch[six.b('data')]
labels = batch.get(six.b('labels'), batch.get(six.b('fine_labels'),
None))
data = batch[b'data']
labels = batch.get(b'labels', batch.get(b'fine_labels', None))
assert labels is not None
for sample, label in six.moves.zip(data, labels):
for sample, label in zip(data, labels):
yield (sample / 255.0).astype(numpy.float32), int(label)
def reader():
......
......@@ -20,7 +20,7 @@ import shutil
import sys
import importlib
import paddle.dataset
import six.moves.cPickle as pickle
import pickle
import tempfile
import glob
import paddle
......
......@@ -24,7 +24,6 @@ import tarfile
import gzip
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
from six.moves import zip, range
__all__ = []
......
......@@ -30,39 +30,34 @@ the image layout as follows.
be keep consistent between the training and inference period.
"""
import six
import numpy as np
# FIXME(minqiyang): this is an ugly fix for the numpy bug reported here
# https://github.com/numpy/numpy/issues/12497
if six.PY3:
import subprocess
import sys
import os
interpreter = sys.executable
# Note(zhouwei): if use Python/C 'PyRun_SimpleString', 'sys.executable'
# will be the C++ execubable on Windows
if sys.platform == 'win32' and 'python.exe' not in interpreter:
interpreter = sys.exec_prefix + os.sep + 'python.exe'
import_cv2_proc = subprocess.Popen([interpreter, "-c", "import cv2"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = import_cv2_proc.communicate()
retcode = import_cv2_proc.poll()
if retcode != 0:
cv2 = None
else:
try:
import cv2
except ImportError:
cv2 = None
import subprocess
import sys
import os
interpreter = sys.executable
# Note(zhouwei): if use Python/C 'PyRun_SimpleString', 'sys.executable'
# will be the C++ execubable on Windows
if sys.platform == 'win32' and 'python.exe' not in interpreter:
interpreter = sys.exec_prefix + os.sep + 'python.exe'
import_cv2_proc = subprocess.Popen([interpreter, "-c", "import cv2"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = import_cv2_proc.communicate()
retcode = import_cv2_proc.poll()
if retcode != 0:
cv2 = None
else:
try:
import cv2
except ImportError:
cv2 = None
import os
import tarfile
import six.moves.cPickle as pickle
import pickle
__all__ = []
......
......@@ -26,7 +26,6 @@ import collections
import tarfile
import re
import string
import six
__all__ = []
......@@ -49,9 +48,8 @@ def tokenize(pattern):
while tf != None:
if bool(pattern.match(tf.name)):
# newline and punctuations removal and ad-hoc tokenization.
yield tarf.extractfile(tf).read().rstrip(
six.b("\n\r")).translate(None, six.b(
string.punctuation)).lower().split()
yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate(
None, string.punctuation.encode('latin-1')).lower().split()
tf = tarf.next()
......@@ -66,11 +64,11 @@ def build_dict(pattern, cutoff):
word_freq[word] += 1
# Not sure if we should prune less-frequent words here.
word_freq = [x for x in six.iteritems(word_freq) if x[1] > cutoff]
word_freq = [x for x in word_freq.items() if x[1] > cutoff]
dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*dictionary))
word_idx = dict(list(zip(words, six.moves.range(len(words)))))
word_idx = dict(list(zip(words, range(len(words)))))
word_idx['<unk>'] = len(words)
return word_idx
......
......@@ -23,7 +23,6 @@ import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import collections
import tarfile
import six
__all__ = []
......@@ -68,13 +67,11 @@ def build_dict(min_word_freq=50):
# remove <unk> for now, since we will set it as last index
del word_freq['<unk>']
word_freq = [
x for x in six.iteritems(word_freq) if x[1] > min_word_freq
]
word_freq = [x for x in word_freq.items() if x[1] > min_word_freq]
word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*word_freq_sorted))
word_idx = dict(list(zip(words, six.moves.range(len(words)))))
word_idx = dict(list(zip(words, range(len(words)))))
word_idx['<unk>'] = len(words)
return word_idx
......@@ -96,7 +93,7 @@ def reader_creator(filename, word_idx, n, data_type):
l = ['<s>'] + l.strip().split() + ['<e>']
if len(l) >= n:
l = [word_idx.get(w, UNK) for w in l]
for i in six.moves.range(n, len(l) + 1):
for i in range(n, len(l) + 1):
yield tuple(l[i - n:i])
elif DataType.SEQ == data_type:
l = l.strip().split()
......
......@@ -23,7 +23,6 @@ import paddle.utils.deprecated as deprecated
import gzip
import numpy
import struct
from six.moves import range
__all__ = []
......
......@@ -28,7 +28,6 @@ import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import re
import functools
import six
__all__ = []
......@@ -206,7 +205,7 @@ def max_movie_id():
Get the maximum value of movie id.
"""
__initialize_meta_info__()
return six.moves.reduce(__max_index_info__, list(MOVIE_INFO.values())).index
return functools.reduce(__max_index_info__, list(MOVIE_INFO.values())).index
@deprecated(
......@@ -219,7 +218,7 @@ def max_user_id():
Get the maximum value of user id.
"""
__initialize_meta_info__()
return six.moves.reduce(__max_index_info__, list(USER_INFO.values())).index
return functools.reduce(__max_index_info__, list(USER_INFO.values())).index
def __max_job_id_impl__(a, b):
......@@ -239,7 +238,7 @@ def max_job_id():
Get the maximum value of job id.
"""
__initialize_meta_info__()
return six.moves.reduce(__max_job_id_impl__,
return functools.reduce(__max_job_id_impl__,
list(USER_INFO.values())).job_id
......
......@@ -20,7 +20,6 @@ parse training set and test set into paddle reader creators.
"""
import numpy as np
import six
import tempfile
import tarfile
import os
......@@ -75,7 +74,7 @@ def load_data(filename, feature_num=14, ratio=0.8):
axis=0), data.sum(axis=0) / data.shape[0]
# if you want to print the distribution of input data, you could use function of feature_range
#feature_range(maximums[:-1], minimums[:-1])
for i in six.moves.range(feature_num - 1):
for i in range(feature_num - 1):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
offset = int(data.shape[0] * ratio)
UCI_TRAIN_DATA = data[:offset]
......
......@@ -20,7 +20,6 @@ parse training set and test set into paddle reader creators.
"""
import six
import tarfile
import paddle.dataset.common
......@@ -173,8 +172,8 @@ def get_dict(dict_size, reverse=True):
tar_file = paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN)
src_dict, trg_dict = __read_to_dict(tar_file, dict_size)
if reverse:
src_dict = {v: k for k, v in six.iteritems(src_dict)}
trg_dict = {v: k for k, v in six.iteritems(trg_dict)}
src_dict = {v: k for k, v in src_dict.items()}
trg_dict = {v: k for k, v in trg_dict.items()}
return src_dict, trg_dict
......
......@@ -29,7 +29,6 @@ Multi30K: Multilingual English-German Image Descriptions.
"""
import os
import six
import tarfile
from collections import defaultdict
......@@ -63,9 +62,7 @@ def __build_dict(tar_file, dict_size, save_path, lang):
with open(save_path, "wb") as fout:
fout.write(("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)).encode())
for idx, word in enumerate(
sorted(six.iteritems(word_dict),
key=lambda x: x[1],
reverse=True)):
sorted(word_dict.items(), key=lambda x: x[1], reverse=True)):
if idx + 3 == dict_size: break
fout.write(word[0].encode())
fout.write(b'\n')
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import argparse
import six
import os
......@@ -30,7 +29,7 @@ class Command(object):
self.np_path = self.prefix + '/np'
def set_np(self, np):
self.etcd.put(self.np_path, six.b('{}'.format(np)))
self.etcd.put(self.np_path, '{}'.format(np).encode('latin-1'))
def scale_np(self, np):
if self.etcd.get(self.np_path)[0] != None:
......
......@@ -262,7 +262,7 @@ class ElasticManager(object):
logger.info(
f"[lease_heartbeat] register host={self.curr_host}")
self.etcd.put(self.host_path,
six.b(self.curr_host),
self.curr_host.encode('latin-1'),
lease=host_lease)
except Exception as e:
logger.error(
......@@ -276,11 +276,15 @@ class ElasticManager(object):
daemon=True)
keepalived_thread.start()
self.etcd.put(self.host_path, six.b(self.curr_host), lease=host_lease)
self.etcd.put(self.host_path,
self.curr_host.encode('latin-1'),
lease=host_lease)
# endpoints handle DISTRIBUTED_TRAINER_ENDPOINTS and PADDLE_TRAINERS
self.etcd.put(self.endpoints_path,
six.b('{}|{}'.format(self.dist_endpoints, self.trainers)))
self.etcd.put(
self.endpoints_path,
'{}|{}'.format(self.dist_endpoints,
self.trainers).encode('latin-1'))
def endpoints_call_back(event):
if not self.dist_endpoints:
......@@ -433,7 +437,7 @@ class ElasticManager(object):
def _update_endpoint(self, endpoints, hosts):
self.etcd.put(self.endpoints_path,
six.b('{}|{}'.format(endpoints, hosts)))
'{}|{}'.format(endpoints, hosts).encode('latin-1'))
def _update_fault_tolrance(self):
rank = int(os.getenv('PADDLE_TRAINER_ID', -1))
......
......@@ -59,7 +59,6 @@ import sys
import tempfile
import os
import time
import six
import copy
import pathlib
from argparse import ArgumentParser, REMAINDER
......@@ -80,7 +79,7 @@ __all__ = []
def _print_arguments(args):
print("----------- Configuration Arguments -----------")
for arg, value in sorted(six.iteritems(vars(args))):
for arg, value in sorted(vars(args).items()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
......
......@@ -24,7 +24,6 @@ import shutil
from contextlib import closing
import multiprocessing
import socket
import six
import struct
import json
......@@ -835,9 +834,7 @@ def get_device_proc_info(args):
"gpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(gpus), args.nproc_per_node)
n = int(len(gpus) / int(args.nproc_per_node))
devices_per_proc = [
gpus[i:i + n] for i in six.moves.range(0, len(gpus), n)
]
devices_per_proc = [gpus[i:i + n] for i in range(0, len(gpus), n)]
else:
devices_per_proc = gpus
elif device_mode == DeviceMode.ASCEND_NPU:
......@@ -847,9 +844,7 @@ def get_device_proc_info(args):
"npus' number:{} mod args.nproc_per_node:{} must == 0".format(len(npus), args.nproc_per_node)
n = int(len(npus) / int(args.nproc_per_node))
devices_per_proc = [
npus[i:i + n] for i in six.moves.range(0, len(npus), n)
]
devices_per_proc = [npus[i:i + n] for i in range(0, len(npus), n)]
else:
devices_per_proc = npus
elif device_mode == DeviceMode.XPU:
......@@ -859,9 +854,7 @@ def get_device_proc_info(args):
"xpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(xpus), args.nproc_per_node)
n = int(len(xpus) / int(args.nproc_per_node))
devices_per_proc = [
xpus[i:i + n] for i in six.moves.range(0, len(xpus), n)
]
devices_per_proc = [xpus[i:i + n] for i in range(0, len(xpus), n)]
else:
devices_per_proc = xpus
elif device_mode == DeviceMode.MLU:
......@@ -871,9 +864,7 @@ def get_device_proc_info(args):
"mlus' number:{} mod args.nproc_per_node:{} must == 0".format(len(mlus), args.nproc_per_node)
n = int(len(mlus) / int(args.nproc_per_node))
devices_per_proc = [
mlus[i:i + n] for i in six.moves.range(0, len(mlus), n)
]
devices_per_proc = [mlus[i:i + n] for i in range(0, len(mlus), n)]
else:
devices_per_proc = mlus
elif device_mode == DeviceMode.CPU:
......
......@@ -17,7 +17,6 @@ import multiprocessing
import re
import time
import six
import abc
from paddle.fluid import core
import functools
......@@ -456,7 +455,7 @@ class HDFSClient(FS):
self.pre_commands.append(dfs)
if configs:
for k, v in six.iteritems(configs):
for k, v in configs.items():
config_command = '-D%s=%s' % (k, v)
self.pre_commands.append(config_command)
......
......@@ -17,7 +17,6 @@ from paddle.distributed.launch import plugins
from .node import Node
from .status import Status
from .args_envs import parse_args, fetch_envs, env_args_mapping
import six
import logging
......@@ -43,7 +42,7 @@ class Context(object):
def print(self):
self.logger.info("----------- Configuration ----------------------")
for arg, value in sorted(six.iteritems(vars(self.args))):
for arg, value in sorted(vars(self.args).items()):
self.logger.info("%s: %s" % (arg, value))
self.logger.info("--------------------------------------------------")
......
......@@ -204,7 +204,7 @@ class ETCDMaster(Master):
self.ctx.logger.debug("sync path {} value {}".format(path, value))
while not self.ctx.status.is_done():
self.client.put(path, six.b(value))
self.client.put(path, value.encode('latin-1'))
result = [i for i in self.client.get_prefix(prefix)]
result = copy.deepcopy(result)
......@@ -244,7 +244,7 @@ class ETCDMaster(Master):
#self.client.delete_prefix(self.job_prefix)
beat_path = "{}/{}".format(self.heartbeat_prefix, pod_id)
self.client.put(beat_path, six.b(pod_id), lease=lease)
self.client.put(beat_path, pod_id.encode('latin-1'), lease=lease)
def _beat_watch(event):
self.ctx.status.restart()
......@@ -257,7 +257,9 @@ class ETCDMaster(Master):
try:
lease.refresh()
if pod_id not in self.fetch_peer_alive():
self.client.put(beat_path, six.b(pod_id), lease=lease)
self.client.put(beat_path,
pod_id.encode('latin-1'),
lease=lease)
self.ctx.logger.debug("Heartbeat register again")
except Exception as e:
self.ctx.logger.error("Heartbeat error {}".format(e))
......@@ -307,7 +309,8 @@ class ETCDMaster(Master):
def set_status(self, status):
assert self.client.put(
self.job_prefix, six.b(status),
self.job_prefix,
status.encode('latin-1'),
lease=self.client.lease(600)), "set status failed {}".format(status)
def get_status(self):
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import os
__all__ = []
......@@ -21,7 +20,7 @@ __all__ = []
# print configuration after args are well filled in controller init
def log(ctx):
ctx.logger.info("----------- Configuration ----------------------")
for arg, value in sorted(six.iteritems(vars(ctx.args))):
for arg, value in sorted(vars(ctx.args).items()):
ctx.logger.info("%s: %s" % (arg, value))
ctx.logger.info("--------------------------------------------------")
......
......@@ -18,7 +18,6 @@ import collections
import os
import warnings
import logging
import six
import paddle.fluid as fluid
from paddle.fluid import core
import paddle.fluid.framework as framework
......@@ -1029,7 +1028,7 @@ def _get_output_map_from_op(varmap, op):
def get_varlist_from_op_map(var_map):
var_list = []
for key, varlist in six.iteritems(var_map):
for key, varlist in var_map.items():
if not isinstance(varlist, list):
varlist = [varlist]
for i in range(len(varlist)):
......@@ -1079,7 +1078,7 @@ def block_append_op(program, origin_program, block, op):
merge_ordereddict = origin_program.global_block().vars.copy()
merge_ordereddict.update(block.vars)
inputs = _get_input_map_from_op(merge_ordereddict, op)
for key, varlist in six.iteritems(inputs):
for key, varlist in inputs.items():
if not isinstance(varlist, list):
varlist = [varlist]
for var in varlist:
......@@ -1092,7 +1091,7 @@ def block_append_op(program, origin_program, block, op):
block._clone_variable(var, force_persistable=False)
outputs = _get_output_map_from_op(origin_program.global_block().vars, op)
for key, varlist in six.iteritems(outputs):
for key, varlist in outputs.items():
if not isinstance(varlist, list):
varlist = [varlist]
for var in varlist:
......
......@@ -15,7 +15,6 @@
import multiprocessing
import os
import signal
import six
import sys
import warnings
......@@ -171,7 +170,7 @@ def _get_subprocess_env_list(nprocs, options):
env_devices = os.getenv("CUDA_VISIBLE_DEVICES", None)
if env_devices is None or env_devices == "":
env_devices_list = [
str(x) for x in six.moves.range(core.get_cuda_device_count())
str(x) for x in range(core.get_cuda_device_count())
]
else:
env_devices_list = env_devices.split(',')
......@@ -206,7 +205,7 @@ def _get_subprocess_env_list(nprocs, options):
env_devices = os.getenv("XPU_VISIBLE_DEVICES", None)
if env_devices is None or env_devices == "":
env_devices_list = [
str(x) for x in six.moves.range(core.get_xpu_device_count())
str(x) for x in range(core.get_xpu_device_count())
]
else:
env_devices_list = env_devices.split(',')
......@@ -240,7 +239,7 @@ def _get_subprocess_env_list(nprocs, options):
env_devices = os.getenv("MLU_VISIBLE_DEVICES", None)
if env_devices is None or env_devices == "":
env_devices_list = [
str(x) for x in six.moves.range(core.get_mlu_device_count())
str(x) for x in range(core.get_mlu_device_count())
]
else:
env_devices_list = env_devices.split(',')
......
......@@ -21,7 +21,6 @@ import subprocess
from contextlib import closing
import socket
from distutils.util import strtobool
import six
from paddle.distributed.fleet.launch_utils import get_backend_by_compile_flag
from ..utils.log_utils import get_logger
......@@ -542,6 +541,6 @@ def watch_local_trainers(procs, nranks):
def _print_arguments(args):
print("----------- Configuration Arguments -----------")
for arg, value in sorted(six.iteritems(vars(args))):
for arg, value in sorted(vars(args).items()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
......@@ -19,7 +19,6 @@ from paddle.fluid import program_guard
from . import core
import collections
import copy
import six
import logging
from .. import compat as cpt
from . import unique_name
......@@ -337,20 +336,18 @@ def _create_op_desc_(op_type, inputs, outputs, attrs):
"""
op_desc = core.OpDesc()
op_desc.set_type(op_type)
for para, args in six.iteritems(inputs):
for para, args in inputs.items():
op_desc.set_input(
para,
list(
map(
lambda arg: arg.decode()
if isinstance(arg, six.binary_type) else arg, args)))
for para, args in six.iteritems(outputs):
map(lambda arg: arg.decode()
if isinstance(arg, bytes) else arg, args)))
for para, args in outputs.items():
op_desc.set_output(
para,
list(
map(
lambda arg: arg.decode()
if isinstance(arg, six.binary_type) else arg, args)))
map(lambda arg: arg.decode()
if isinstance(arg, bytes) else arg, args)))
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
......@@ -360,7 +357,7 @@ def _create_op_desc_(op_type, inputs, outputs, attrs):
op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward
if op_device_attr_name not in attrs:
attrs[op_device_attr_name] = ""
for name, val in six.iteritems(attrs):
for name, val in attrs.items():
if isinstance(val, framework.Block):
op_desc.set_block_attr(name, val.desc)
else:
......@@ -604,7 +601,7 @@ def _addup_repetitive_outputs_(op_descs,
# record the latest device
var_device[var_name] = op_device
for var_name, inputs in six.iteritems(renamed_vars):
for var_name, inputs in renamed_vars.items():
if len(renamed_vars[var_name]) > 1:
if len(renamed_vars[var_name]) > _MAX_ADD_NUM_:
_accumulate_gradients_by_sum_op_(var_name, renamed_vars,
......@@ -823,7 +820,7 @@ def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set):
def serialize_op_decs(op_desc):
protostr = op_desc.serialize_to_string()
proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
proto = framework_pb2.OpDesc.FromString(bytes(protostr))
return proto.__str__()
......@@ -1375,9 +1372,9 @@ def _find_parent_op_(sub_block):
return None
program = sub_block.program
for block_id in six.moves.range(program.num_blocks):
for block_id in range(program.num_blocks):
block_desc = program.block(block_id).desc
for op_idx in six.moves.range(block_desc.op_size()):
for op_idx in range(block_desc.op_size()):
op = block_desc.op(op_idx)
if op.has_attr("sub_block") and op._block_attr_id(
"sub_block") == sub_block_id:
......@@ -1510,7 +1507,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
op_desc._rename_output(name, new_name)
var_map[name] = new_name
for g, ng in six.iteritems(var_map):
for g, ng in var_map.items():
if g in grad_to_var:
grad_to_var[ng] = grad_to_var[g]
grad_to_var.pop(g)
......@@ -1548,7 +1545,7 @@ def _get_no_grad_set_name(no_grad_set):
for i, no_grad_var in enumerate(no_grad_set):
if isinstance(no_grad_var, framework.Variable):
no_grad_set_name.add(no_grad_var.name)
elif isinstance(no_grad_var, six.string_types):
elif isinstance(no_grad_var, str):
no_grad_set_name.add(no_grad_var)
else:
raise TypeError(
......@@ -1825,11 +1822,11 @@ def append_backward(loss,
parameters = []
for i, param in enumerate(parameter_list):
check_type(param, 'parameter_list[%s]' % i,
(framework.Variable, six.string_types),
(framework.Variable, str),
'fluid.backward.append_backward')
if isinstance(param, framework.Variable):
parameters.append(param.name)
elif isinstance(param, six.string_types):
elif isinstance(param, str):
parameters.append(param)
else:
params = program.global_block().all_parameters()
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import copy
import six
import warnings
import functools
......@@ -829,7 +828,7 @@ def set_gradient_clip(clip, param_list=None, program=None):
if param_list is None:
param_list = program.block(0).all_parameters()
if all(isinstance(elem, six.string_types) for elem in param_list):
if all(isinstance(elem, str) for elem in param_list):
param_list = [program.block(0).var(elem) for elem in param_list]
if not all(isinstance(elem, framework.Parameter) for elem in param_list):
raise TypeError(
......
......@@ -14,7 +14,6 @@
import multiprocessing
import os
import six
import sys
import warnings
from . import framework
......@@ -430,11 +429,11 @@ class CompiledProgram(object):
self._persistable_vars = list(set(self._persistable_vars))
self._persistable_vars.sort()
return core.ParallelExecutor(
places, self._persistable_vars,
self._loss_name if self._loss_name else six.u(''), self._scope,
self._local_scopes, self._exec_strategy, self._build_strategy,
self._graph)
return core.ParallelExecutor(places, self._persistable_vars,
self._loss_name if self._loss_name else '',
self._scope, self._local_scopes,
self._exec_strategy, self._build_strategy,
self._graph)
def _compile_inference(self):
return core.create_paddle_predictor(self._infer_config)
......
......@@ -22,7 +22,6 @@ This API is still under active development and may change drastically.
from ...wrapped_decorator import signature_safe_contextmanager
import numpy as np
import six
from ... import layers
from ...framework import Variable
......@@ -197,7 +196,7 @@ class StateCell(object):
self._helper = LayerHelper('state_cell', name=name)
self._cur_states = {}
self._state_names = []
for state_name, state in six.iteritems(states):
for state_name, state in states.items():
if not isinstance(state, InitState):
raise ValueError('state must be an InitState object.')
self._cur_states[state_name] = state
......@@ -352,7 +351,7 @@ class StateCell(object):
if self._in_decoder and not self._switched_decoder:
self._switch_decoder()
for input_name, input_value in six.iteritems(inputs):
for input_name, input_value in inputs.items():
if input_name not in self._inputs:
raise ValueError('Unknown input %s. '
'Please make sure %s in input '
......@@ -367,7 +366,7 @@ class StateCell(object):
if self._in_decoder and not self._switched_decoder:
self._switched_decoder()
for state_name, decoder_state in six.iteritems(self._states_holder):
for state_name, decoder_state in self._states_holder.items():
if id(self._cur_decoder_obj) not in decoder_state:
raise ValueError('Unknown decoder object, please make sure '
'switch_decoder been invoked.')
......@@ -687,7 +686,7 @@ class BeamSearchDecoder(object):
feed_dict = {}
update_dict = {}
for init_var_name, init_var in six.iteritems(self._input_var_dict):
for init_var_name, init_var in self._input_var_dict.items():
if init_var_name not in self.state_cell._inputs:
raise ValueError('Variable ' + init_var_name +
' not found in StateCell!\n')
......@@ -735,8 +734,7 @@ class BeamSearchDecoder(object):
self.state_cell.update_states()
self.update_array(prev_ids, selected_ids)
self.update_array(prev_scores, selected_scores)
for update_name, var_to_update in six.iteritems(
update_dict):
for update_name, var_to_update in update_dict.items():
self.update_array(var_to_update, feed_dict[update_name])
def read_array(self, init, is_ids=False, is_scores=False):
......
......@@ -16,7 +16,6 @@ Contrib layers just related to the neural network.
"""
import os
import six
import warnings
import inspect
......
......@@ -20,8 +20,6 @@ batch size to fully utilize a GPU.
This API is still under active development and may change drastically.
"""
import six
from .. import core
from ..framework import Program, Variable
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import abc
import copy
......
......@@ -98,8 +98,7 @@ def combine_abs_max_and_hist(tensor, origin_max, origin_hist, bins,
return new_max, new_hist
@six.add_metaclass(abc.ABCMeta)
class BaseQuantizer(object):
class BaseQuantizer(metaclass=abc.ABCMeta):
"""
Base quantizer for activation and weight.
"""
......@@ -173,8 +172,7 @@ class PerChannelAbsmaxQuantizer(BaseQuantizer):
self.thresholds = self.abs_max_vals
@six.add_metaclass(abc.ABCMeta)
class BaseHistQuantizer(BaseQuantizer):
class BaseHistQuantizer(BaseQuantizer, metaclass=abc.ABCMeta):
"""
"""
......
......@@ -18,7 +18,6 @@ import sys
import argparse
import logging
import struct
import six
import numpy as np
import time
import paddle
......
......@@ -18,7 +18,6 @@ import sys
import argparse
import logging
import struct
import six
import numpy as np
import time
import paddle
......
......@@ -13,7 +13,6 @@
# limitations under the license.
import os
import six
import numpy as np
import unittest
import paddle
......
......@@ -17,7 +17,6 @@ import unittest
import random
import numpy as np
import paddle.fluid as fluid
import six
import paddle
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
......
......@@ -17,7 +17,6 @@ import unittest
import random
import numpy as np
import paddle.fluid as fluid
import six
import paddle
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
......@@ -38,7 +37,7 @@ def linear_fc(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
for _ in range(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = paddle.mean(loss)
......@@ -72,7 +71,7 @@ def residual_block(num, quant_skip_pattern=None):
dtype='int64',
append_batch_size=False)
hidden = data
for _ in six.moves.xrange(num):
for _ in range(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
......@@ -579,7 +578,7 @@ def quant_dequant_residual_block(num, quant_skip_pattern=None):
dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data1
for _ in six.moves.xrange(num):
for _ in range(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
......
......@@ -16,7 +16,6 @@ import os
import unittest
import random
import numpy as np
import six
import tempfile
import paddle.fluid as fluid
import paddle
......
......@@ -16,7 +16,6 @@ import os
import unittest
import random
import numpy as np
import six
import paddle.fluid as fluid
import paddle
from paddle.fluid.framework import IrGraph
......
......@@ -17,7 +17,6 @@ import unittest
import json
import random
import numpy as np
import six
import tempfile
import paddle.fluid as fluid
import paddle
......
......@@ -13,7 +13,6 @@
# limitations under the license.
import numpy as np
import six
import unittest
import paddle
......@@ -29,7 +28,7 @@ def linear_fc(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
for _ in range(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = paddle.mean(loss)
......@@ -57,7 +56,7 @@ def residual_block(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
for _ in range(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import unittest
from functools import partial
import numpy as np
......@@ -32,7 +31,7 @@ def fake_imdb_reader(word_dict_size,
class_dim=2):
def __reader__():
for _ in six.moves.range(sample_num):
for _ in range(sample_num):
length = np.random.random_integers(low=lower_seq_len,
high=upper_seq_len,
size=[1])[0]
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import numpy as np
import six
from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper
......@@ -107,11 +106,11 @@ def data(name, shape, dtype='float32', lod_level=0):
"""
helper = LayerHelper('data', **locals())
check_type(name, 'name', (six.binary_type, six.text_type), 'data')
check_type(name, 'name', (bytes, str), 'data')
check_type(shape, 'shape', (list, tuple), 'data')
shape = list(shape)
for i in six.moves.range(len(shape)):
for i in range(len(shape)):
if shape[i] is None:
shape[i] = -1
......
......@@ -15,8 +15,6 @@
from . import core
import numpy as np
import os
import six
from six.moves import zip, range, xrange
import multiprocessing
import warnings
......@@ -188,7 +186,7 @@ class DataToLoDTensorConverter(object):
def _reset(self):
self.data = []
self.lod = [[] for _ in six.moves.range(self.lod_level)]
self.lod = [[] for _ in range(self.lod_level)]
def feed(self, data):
self._feed_impl_(data, self.lod, self.lod_level)
......@@ -249,8 +247,7 @@ class BatchedTensorProvider(object):
def __call__(self):
idx = 0
for each_sample in self.generator():
for each_slot, each_converter in six.moves.zip(
each_sample, self.converters):
for each_slot, each_converter in zip(each_sample, self.converters):
each_converter.data.append(each_slot)
idx += 1
......@@ -333,7 +330,7 @@ class DataFeeder(object):
if program is None:
program = default_main_program()
for each_var in feed_list:
if isinstance(each_var, six.string_types):
if isinstance(each_var, str):
each_var = program.block(0).var(each_var)
if not isinstance(each_var, Variable):
raise TypeError("Feed list should contain a list of variable")
......@@ -383,9 +380,8 @@ class DataFeeder(object):
"""
converter = []
for lod_level, shape, dtype in six.moves.zip(self.feed_lod_level,
self.feed_shapes,
self.feed_dtypes):
for lod_level, shape, dtype in zip(self.feed_lod_level,
self.feed_shapes, self.feed_dtypes):
converter.append(
DataToLoDTensorConverter(place=self.place,
lod_level=lod_level,
......@@ -396,12 +392,10 @@ class DataFeeder(object):
assert len(each_sample) == len(converter), (
"The number of fields in data (%d) does not match " +
"len(feed_list) (%d)") % (len(each_sample), len(converter))
for each_converter, each_slot in six.moves.zip(
converter, each_sample):
for each_converter, each_slot in zip(converter, each_sample):
each_converter.feed(each_slot)
ret_dict = {}
for each_name, each_converter in six.moves.zip(self.feed_names,
converter):
for each_name, each_converter in zip(self.feed_names, converter):
ret_dict[each_name] = each_converter.done()
return ret_dict
......@@ -461,13 +455,13 @@ class DataFeeder(object):
"""
if isinstance(self.place, core.CUDAPlace):
places = [
core.CUDAPlace(i) for i in six.moves.xrange(
self._get_number_of_places_(num_places))
core.CUDAPlace(i)
for i in range(self._get_number_of_places_(num_places))
]
else:
places = [
core.CPUPlace() for _ in six.moves.xrange(
self._get_number_of_places_(num_places))
core.CPUPlace()
for _ in range(self._get_number_of_places_(num_places))
]
if len(iterable) != len(places):
......@@ -477,7 +471,7 @@ class DataFeeder(object):
"must be same.")
place = self.place
for p, batch in six.moves.zip(places, iterable):
for p, batch in zip(places, iterable):
self.place = p
yield self.feed(batch)
self.place = place
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import sys
import six
import random
import os
import re
......@@ -235,7 +234,7 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"):
graph = GraphPreviewGenerator("some graph")
# collect parameters and args
protostr = block.desc.serialize_to_string()
desc = framework_pb2.BlockDesc.FromString(six.binary_type(protostr))
desc = framework_pb2.BlockDesc.FromString(bytes(protostr))
def need_highlight(name):
if highlights is None: return False
......
......@@ -23,6 +23,7 @@ import types
import numpy
import six
import builtins
from paddle.fluid.dygraph.container import Sequential
from paddle.fluid.dygraph.dygraph_to_static.convert_operators import convert_len, convert_zip
......@@ -73,7 +74,7 @@ def is_builtin(func, name=None):
if isinstance(func, types.BuiltinFunctionType) and name_judge():
return True
elif func in six.moves.builtins.__dict__.values() and name_judge():
elif func in builtins.__dict__.values() and name_judge():
return True
else:
return False
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import os
import six
import sys
import traceback
import linecache
......@@ -380,14 +379,11 @@ class ErrorData(object):
raise
new_exception = self.create_exception()
if six.PY3:
# NOTE(liym27):
# 1. Why `raise new_exception from None`?
# In Python 3, by default, an new exception is raised with trace information of the caught exception.
# This only raises new_exception and hides unwanted implementation details from tracebacks of the
# caught exception.
# 2. Use exec to bypass syntax error checking in Python 2.
six.exec_("raise new_exception from None")
else:
raise new_exception
# NOTE(liym27):
# Why `raise new_exception from None`?
#
# In Python 3, by default, an new exception is raised with trace information of the caught exception.
# This only raises new_exception and hides unwanted implementation details from tracebacks of the
# caught exception.
raise new_exception from None
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import inspect
import numpy as np
import collections
......@@ -83,7 +82,7 @@ class FunctionSpec(object):
args = list(args)
for i in six.moves.range(len(args), len(self._arg_names)):
for i in range(len(args), len(self._arg_names)):
arg_name = self._arg_names[i]
if arg_name in kwargs:
args.append(kwargs[arg_name])
......@@ -315,7 +314,7 @@ def convert_to_input_spec(inputs, input_spec):
elif isinstance(input_spec, dict):
input_with_spec = {}
check_type_and_len(inputs, input_spec, True)
for name, input in six.iteritems(inputs):
for name, input in inputs.items():
if name in input_spec:
input_with_spec[name] = convert_to_input_spec(
input, input_spec[name])
......@@ -380,7 +379,7 @@ def _replace_spec_name(name, input_spec):
return processed_specs
elif isinstance(input_spec, dict):
processed_specs = {}
for key, spec in six.iteritems(input_spec):
for key, spec in input_spec.items():
processed_specs[key] = _replace_spec_name(key, spec)
return processed_specs
else:
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import copy
import textwrap
from collections import defaultdict
......@@ -270,7 +269,7 @@ class NameVisitor(gast.NodeVisitor):
return False
def _update_name_ids(self, new_name_ids):
for name_id, ctxs in six.iteritems(new_name_ids):
for name_id, ctxs in new_name_ids.items():
self.name_ids[name_id] = ctxs + self.name_ids[name_id]
......
......@@ -15,7 +15,6 @@
import os
import threading
import six
from paddle.fluid import log_helper
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
......@@ -117,7 +116,7 @@ class TranslatorLogger(object):
self._need_to_echo_code_to_stdout = code_to_stdout
def check_level(self, level):
if isinstance(level, (six.integer_types, type(None))):
if isinstance(level, (int, type(None))):
rv = level
else:
raise TypeError("Level is not an integer: {}".format(level))
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import numpy as np
import six
import paddle
from paddle.fluid import framework, backward, core, program_guard
......@@ -873,7 +872,7 @@ class PartialProgramLayer:
param_and_buffer_names_set.add(var.name)
for block in main_program.blocks:
for name, var in six.iteritems(block.vars):
for name, var in block.vars.items():
if isinstance(var, framework.Parameter):
if name not in param_and_buffer_names_set:
raise ValueError(
......
......@@ -15,7 +15,6 @@
import collections
from paddle.utils import gast
import inspect
import six
import textwrap
import threading
import weakref
......@@ -983,7 +982,7 @@ class ProgramCache(object):
return len(self._caches)
def concrete_programs(self):
return [cp for key, (cp, _) in six.iteritems(self._caches)]
return [cp for key, (cp, _) in self._caches.items()]
def synchronized(func):
......
......@@ -19,9 +19,9 @@ import copy
import collections
from paddle.utils import gast
import inspect
import os, sys
import os
import sys
import shutil
import six
import tempfile
import textwrap
import numpy as np
......@@ -135,7 +135,7 @@ def data_layer_not_check(name, shape, dtype='float32', lod_level=0):
"""
helper = LayerHelper('data', **locals())
shape = list(shape)
for i in six.moves.range(len(shape)):
for i in range(len(shape)):
if shape[i] is None:
shape[i] = -1
......@@ -488,7 +488,7 @@ def generate_name_node(name_ids, ctx=gast.Load(), gen_tuple_if_single=False):
This function is used at several gast.Return statements.
"""
if isinstance(name_ids, six.string_types):
if isinstance(name_ids, str):
name_ids = [name_ids]
if not isinstance(name_ids, (list, tuple, set)):
raise TypeError(
......@@ -640,7 +640,7 @@ def recover_globals_attribute(src_obj, dst_obj):
src_globals = getattr(src_obj, attr_name, {})
dst_globals = getattr(dst_obj, attr_name, {})
for k, v in six.iteritems(src_globals):
for k, v in src_globals.items():
# ignore builtin attribute.
if not (k.startswith('__') and k.endswith('__')):
dst_globals[k] = v
......@@ -889,7 +889,7 @@ class IsControlFlowVisitor(gast.NodeVisitor):
# Look up the node_var_type_map by name_id.
if self.node_var_type_map:
if name_id and isinstance(name_id, six.string_types):
if name_id and isinstance(name_id, str):
var_type = self.node_var_type_map.get(name_id, None)
if var_type and var_type & NodeVarType.TENSOR_TYPES:
return True
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import paddle
import textwrap
from paddle.utils import gast
......@@ -59,7 +58,7 @@ def to_static_variable(x):
return paddle.full(shape=[1], dtype='bool', fill_value=x)
if isinstance(x, float):
return paddle.full(shape=[1], dtype='float64', fill_value=x)
if isinstance(x, six.integer_types):
if isinstance(x, int):
return paddle.full(shape=[1], dtype='int64', fill_value=x)
if isinstance(x, UndefinedVar) or x is None:
"""
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import os
import six
import pickle
import numpy as np
......@@ -69,17 +68,17 @@ def _is_persistable(var_desc):
def _is_parameter(persistable_var_desc, program_desc):
# 1. firstly, param should be input of op
input_ops = [] # op can be repeated
for block_idx in six.moves.range(program_desc.num_blocks()):
for block_idx in range(program_desc.num_blocks()):
block = program_desc.block(block_idx)
for op_idx in six.moves.range(block.op_size()):
for op_idx in range(block.op_size()):
op = block.op(op_idx)
# NOTE: parameter is the input of a certain op
if persistable_var_desc.name() in op.input_arg_names():
input_ops.append(op)
# 2. secondly, param should not be output of op or be same op's output
for block_idx in six.moves.range(program_desc.num_blocks()):
for block_idx in range(program_desc.num_blocks()):
block = program_desc.block(block_idx)
for op_idx in six.moves.range(block.op_size()):
for op_idx in range(block.op_size()):
op = block.op(op_idx)
if persistable_var_desc.name() in op.output_arg_names():
# such as batch_norm_op
......@@ -92,7 +91,7 @@ def _is_parameter(persistable_var_desc, program_desc):
def _get_persistable_vars(program_desc):
persistable_vars = []
for i in six.moves.range(program_desc.num_blocks()):
for i in range(program_desc.num_blocks()):
block = program_desc.block(i)
persistable_vars.extend(list(filter(_is_persistable, block.all_vars())))
return persistable_vars
......@@ -111,7 +110,7 @@ def _get_persistable_var_names(program_desc):
def _get_all_var_names(program_desc):
all_var_names = set()
for i in six.moves.range(program_desc.num_blocks()):
for i in range(program_desc.num_blocks()):
block = program_desc.block(i)
for var in block.all_vars():
all_var_names.add(var.name())
......@@ -142,10 +141,10 @@ def _append_loaded_suffix_to_var(program_desc):
new_name = _append_loaded_suffix(var_desc.name())
suffix_varname_dict[new_name] = old_name
var_desc.set_name(new_name)
for block_idx in six.moves.range(program_desc.num_blocks()):
for block_idx in range(program_desc.num_blocks()):
block = program_desc.block(block_idx)
block._rename_var(old_name.encode(), new_name.encode())
for op_idx in six.moves.range(block.op_size()):
for op_idx in range(block.op_size()):
op = block.op(op_idx)
op._rename_input(old_name, new_name)
op._rename_output(old_name, new_name)
......@@ -191,7 +190,7 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None):
dict_rename_var_new_old = dict()
old_names = []
# Store all old names
for b_idx in six.moves.range(program_desc.num_blocks()):
for b_idx in range(program_desc.num_blocks()):
cur_block = program_desc.block(b_idx)
for var in cur_block.all_vars():
old_names.append(var.name())
......@@ -199,7 +198,7 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None):
# Create dict_rename_var_new_old and dict_rename_var_old_new for non double
# grad variables
has_double_grad = False
for b_idx in six.moves.range(program_desc.num_blocks()):
for b_idx in range(program_desc.num_blocks()):
cur_block = program_desc.block(b_idx)
for var_idx, var in enumerate(cur_block.all_vars()):
name_old = var.name()
......@@ -232,7 +231,7 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None):
if has_double_grad:
double_grad_rename_dict = {}
for name_old in dict_rename_var_old_new:
for b_idx in six.moves.range(program_desc.num_blocks()):
for b_idx in range(program_desc.num_blocks()):
cur_block = program_desc.block(b_idx)
for var_idx, var in enumerate(cur_block.all_vars()):
var_name = var.name()
......@@ -247,9 +246,9 @@ def _rename_var_program_desc(program_desc, include=None, exclude=None):
double_grad_rename_dict[var_name]] = var_name
# Rename on program desc
for b_idx in six.moves.range(program_desc.num_blocks()):
for b_idx in range(program_desc.num_blocks()):
cur_block = program_desc.block(b_idx)
for op_idx in six.moves.range(cur_block.op_size()):
for op_idx in range(cur_block.op_size()):
op = cur_block.op(op_idx)
for input_arg_name in op.input_arg_names():
if input_arg_name in dict_rename_var_old_new:
......@@ -283,8 +282,7 @@ def _build_program_by_desc(program_desc):
prog = framework.Program()
prog.desc = program_desc
prog.blocks = [
framework.Block(prog, i)
for i in six.moves.range(prog.desc.num_blocks())
framework.Block(prog, i) for i in range(prog.desc.num_blocks())
]
prog._sync_with_cpp()
return prog
......@@ -292,9 +290,9 @@ def _build_program_by_desc(program_desc):
def _change_is_test_status(program_desc, is_test):
# change all `is_test` attributes
for i in six.moves.range(program_desc.num_blocks()):
for i in range(program_desc.num_blocks()):
block = program_desc.block(i)
for j in six.moves.range(block.op_size()):
for j in range(block.op_size()):
op = block.op(j)
if op.has_attr('is_test'):
op._set_attr('is_test', is_test)
......@@ -407,7 +405,7 @@ class _ProgramHolder(object):
# remove feed, fetch and scale-1 op, remove op_callstack attr
ops_to_remove = []
root_block = program_desc.block(0)
for i in six.moves.range(root_block.op_size()):
for i in range(root_block.op_size()):
op = root_block.op(i)
if op.type() == 'feed':
ops_to_remove.append(i)
......@@ -511,7 +509,7 @@ class _ProgramHolder(object):
program = _build_program_by_desc(program_desc_copy)
# 3. Add the outputs which is only used for training and not saved in
# inference program.
for block_idx in six.moves.range(program.num_blocks):
for block_idx in range(program.num_blocks):
block = program.block(block_idx)
for op in block.ops:
if op.type == "batch_norm":
......
......@@ -22,7 +22,6 @@ import inspect
import threading
from typing import Text, Tuple, Any, List
import six
import paddle
from paddle.fluid import core, dygraph
from paddle.fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy
......@@ -338,7 +337,7 @@ class _SaveLoadConfig(object):
def model_filename(self, filename):
if filename is None:
return
if not isinstance(filename, six.string_types):
if not isinstance(filename, str):
raise TypeError(
"The config `model_filename` should be str, but received input's type is %s."
% type(filename))
......@@ -354,7 +353,7 @@ class _SaveLoadConfig(object):
def params_filename(self, filename):
if filename is None:
return
if not isinstance(filename, six.string_types):
if not isinstance(filename, str):
raise TypeError(
"The config `params_filename` should be str, but received input's type is %s."
% type(filename))
......@@ -985,7 +984,7 @@ def save(layer, path, input_spec=None, **configs):
# we only record the state_dict variable's structured name
state_names_dict = dict()
state_var_dict = dict()
for structured_name, var in six.iteritems(dygraph_state_dict):
for structured_name, var in dygraph_state_dict.items():
state_names_dict[var.name] = structured_name
state_var_dict[var.name] = var
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import warnings
from paddle.fluid.framework import default_main_program, _non_static_mode
......@@ -64,7 +63,7 @@ def set_op_customized_attrs_post_hook(layer, inputs, outputs):
layer._op_recorder.ops = ops
for op in ops:
for attr_name, val in six.iteritems(layer._customized_attrs):
for attr_name, val in layer._customized_attrs.items():
op._set_attr(attr_name, val)
# remove pre-hook and post-hook
......
......@@ -13,11 +13,10 @@
# limitations under the License.
import copy
import six
from ..framework import Parameter, _non_static_mode, _global_flags
from ..param_attr import ParamAttr
from .. import core
from six.moves import zip
from ..layer_helper_base import LayerHelperBase
from ..dygraph_utils import _append_activation_in_dygraph
......@@ -78,7 +77,7 @@ class LayerObjectHelper(LayerHelperBase):
self.name))
elif len(param_attr) == 1 and length != 1:
tmp = [None] * length
for i in six.moves.range(length):
for i in range(length):
tmp[i] = copy.deepcopy(param_attr[0])
param_attr = tmp
return param_attr
......@@ -150,7 +149,7 @@ class LayerObjectHelper(LayerHelperBase):
act = act
if act is None:
return input_var
if isinstance(act, six.string_types):
if isinstance(act, str):
act = {'type': act}
else:
raise TypeError(
......
......@@ -16,7 +16,6 @@ import collections
import contextlib
import sys
import numpy as np
import six
import re
import copy
import weakref
......@@ -441,7 +440,7 @@ class Layer(object):
"""
temp_attr = copy.deepcopy(attr)
if isinstance(temp_attr, six.string_types) and temp_attr == "":
if isinstance(temp_attr, str) and temp_attr == "":
temp_attr = None
return self._helper.create_parameter(temp_attr, shape, dtype, is_bias,
default_initializer)
......@@ -770,7 +769,7 @@ class Layer(object):
if '_buffers' not in self.__dict__:
raise ValueError(
"super(YourLayer, self).__init__() should be called first")
elif not isinstance(name, six.string_types):
elif not isinstance(name, str):
raise TypeError(
"The name of buffer should be a string, but received {}.".
format(type(name).__name__))
......@@ -1038,7 +1037,7 @@ class Layer(object):
if '_parameters' not in self.__dict__:
raise RuntimeError(
"super(YourLayer, self).__init__() should be called firstly.")
elif not isinstance(name, six.string_types):
elif not isinstance(name, str):
raise TypeError(
"The name of parameter should be a string, but received {}.".
format(type(name).__name__))
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import paddle
from six.moves import reduce
from .. import core
from ..layers import utils
from ..layers import nn as F
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import os
import six
import numpy as np
import warnings
from collections import OrderedDict
......
......@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from collections import defaultdict
from paddle.fluid import core
from paddle.fluid import framework
......
......@@ -384,8 +384,8 @@ def _add_feed_fetch_ops(program,
fetch_op):
for i, var in enumerate(fetch_list):
assert isinstance(var, Variable) or isinstance(
var, six.string_types), ("Wrong type for fetch_list[%s]: %s" %
(i, type(var)))
var, str), ("Wrong type for fetch_list[%s]: %s" %
(i, type(var)))
global_block.append_op(type=fetch_op,
inputs={'X': [var]},
outputs={'Out': [fetch_var]},
......@@ -429,7 +429,7 @@ def _fetch_var(name, scope=None, return_numpy=True):
Returns:
LodTensor|numpy.ndarray
"""
assert isinstance(name, six.string_types)
assert isinstance(name, str)
if scope is None:
scope = global_scope()
assert isinstance(scope, core._Scope)
......@@ -452,7 +452,7 @@ def _to_name_str(var):
return var.desc.name()
elif isinstance(var, str):
return var
elif isinstance(var, six.string_types):
elif isinstance(var, str):
return str(var)
elif isinstance(var, Operator):
return str(id(var))
......@@ -987,7 +987,7 @@ class Executor(object):
def _fetch_data(self, fetch_list, fetch_var_name, scope):
outs = [
core.get_fetch_variable(scope, fetch_var_name, i)
for i in six.moves.range(len(fetch_list))
for i in range(len(fetch_list))
]
return outs
......@@ -1017,7 +1017,7 @@ class Executor(object):
raise TypeError(
"The operator in fetch_list is not an optimize_op")
elif isinstance(item, Variable) or isinstance(
item, str) or isinstance(item, six.string_types):
item, str) or isinstance(item, str):
_fetch_list.append(item)
else:
raise TypeError(
......@@ -1808,8 +1808,7 @@ class Executor(object):
return exe.run(feed)
def _check_fetch_list(self, fetch_list):
is_fetch_var = lambda var: isinstance(var,
(Variable, str, six.string_types))
is_fetch_var = lambda var: isinstance(var, (Variable, str))
is_tuple_list = lambda var: isinstance(var, (tuple, list))
if fetch_list is None: return []
......@@ -2404,9 +2403,8 @@ class Executor(object):
fetch_op):
for i, var in enumerate(fetch_list):
assert isinstance(var, Variable) or isinstance(
var,
six.string_types), ("Wrong type for fetch_list[%s]: %s" %
(i, type(var)))
var, str), ("Wrong type for fetch_list[%s]: %s" %
(i, type(var)))
global_block.append_op(type=fetch_op,
inputs={'X': [var]},
outputs={'Out': [fetch_var]},
......
......@@ -20,7 +20,6 @@ from .wrapped_decorator import signature_safe_contextmanager, wrap_decorator
import os
import re
import traceback
import six
import copy
from types import MethodType, FunctionType
......@@ -448,7 +447,7 @@ def require_version(min_version, max_version=None):
zero_version = ['0', '0', '0', '0']
def version_cmp(ver_a, ver_b):
for i in six.moves.range(len(ver_a)):
for i in range(len(ver_a)):
if int(ver_a[i]) > int(ver_b[i]):
return 1
elif int(ver_a[i]) < int(ver_b[i]):
......@@ -688,7 +687,7 @@ def _cuda_ids():
if gpus_env:
device_ids = [int(s) for s in gpus_env.split(",")]
else:
device_ids = six.moves.range(core.get_cuda_device_count())
device_ids = range(core.get_cuda_device_count())
return device_ids
......@@ -697,7 +696,7 @@ def _xpu_ids():
if xpus_env:
device_ids = [int(s) for s in xpus_env.split(",")]
else:
device_ids = six.moves.range(core.get_xpu_device_count())
device_ids = range(core.get_xpu_device_count())
return device_ids
......@@ -706,7 +705,7 @@ def _npu_ids():
if npus_env:
device_ids = [int(s) for s in npus_env.split(",")]
else:
device_ids = six.moves.range(core.get_npu_device_count())
device_ids = range(core.get_npu_device_count())
return device_ids
......@@ -715,7 +714,7 @@ def _mlu_ids():
if mlus_env:
device_ids = [int(s) for s in mlus_env.split(",")]
else:
device_ids = six.moves.range(core.get_mlu_device_count())
device_ids = range(core.get_mlu_device_count())
return device_ids
......@@ -1322,8 +1321,7 @@ class ParameterMetaClass(VariableMetaClass):
return issubclass(t, Parameter)
@six.add_metaclass(VariableMetaClass)
class Variable(object):
class Variable(metaclass=VariableMetaClass):
"""
**Notes**:
**The constructor of Variable should not be invoked directly.**
......@@ -1763,7 +1761,7 @@ class Variable(object):
assert isinstance(throw_on_error, bool) and isinstance(
with_details, bool)
protostr = self.desc.serialize_to_string()
proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))
proto = framework_pb2.VarDesc.FromString(bytes(protostr))
res_str = _debug_string_(proto, throw_on_error)
if with_details:
additional_attr = ("error_clip", )
......@@ -2579,7 +2577,7 @@ def get_all_op_protos():
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))
op_proto = framework_pb2.OpProto.FromString(bytes(pbstr))
ret_values.append(op_proto)
return ret_values
......@@ -2812,9 +2810,9 @@ class Operator(object):
% (in_proto.name, len(in_args)))
in_arg_names = []
for index, arg in enumerate(in_args):
if isinstance(arg, six.string_types):
if isinstance(arg, str):
in_arg_names.append(arg)
elif isinstance(arg, six.binary_type):
elif isinstance(arg, bytes):
in_arg_names.append(arg.decode())
elif isinstance(arg, (Variable, core.VarBase)):
in_arg_names.append(arg.name)
......@@ -2850,13 +2848,13 @@ class Operator(object):
% (out_proto.name, len(out_args)))
out_arg_names = []
for arg in out_args:
if isinstance(arg, six.string_types):
if isinstance(arg, str):
out_arg_names.append(arg)
else:
out_arg_names.append(arg.name)
# TODO(minqiyang): could we remove variable's op in static mode?
if not _non_static_mode():
if isinstance(arg, six.string_types):
if isinstance(arg, str):
block.var(arg).op = self
else:
arg.op = self
......@@ -2911,7 +2909,7 @@ class Operator(object):
"""
protostr = self.desc.serialize_to_string()
proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
proto = framework_pb2.OpDesc.FromString(bytes(protostr))
return _debug_string_(proto, throw_on_error)
def _to_readable_code(self, skip_op_callstack=True):
......@@ -3528,8 +3526,7 @@ class Block(object):
res_str += "\n}"
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.BlockDesc.FromString(
six.binary_type(protostr))
proto = framework_pb2.BlockDesc.FromString(bytes(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
......@@ -3581,7 +3578,7 @@ class Block(object):
Returns:
Variable: the Variable with the giving name.
"""
if not isinstance(name, six.string_types):
if not isinstance(name, str):
raise TypeError(
"var require string as parameter, but get %s instead." %
(type(name)))
......@@ -3650,7 +3647,7 @@ class Block(object):
return list(self.iter_parameters())
def iter_parameters(self):
return (item[1] for item in six.iteritems(self.vars)
return (item[1] for item in self.vars.items()
if isinstance(item[1], Parameter))
def create_var(self, *args, **kwargs):
......@@ -4736,14 +4733,14 @@ class IrGraph(object):
"""
op_desc = core.OpDesc()
op_desc.set_type(op_type)
for attr, value in six.iteritems(attrs):
for attr, value in attrs.items():
self._update_desc_attr(op_desc, attr, value)
for input_name, var_nodes in six.iteritems(inputs):
for input_name, var_nodes in inputs.items():
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_input(input_name,
[var_node.name() for var_node in var_nodes])
for output_name, var_nodes in six.iteritems(outputs):
for output_name, var_nodes in outputs.items():
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_output(output_name,
......@@ -4889,7 +4886,7 @@ class IrGraph(object):
"""
adj_list = core.build_adjacency_list(self.graph)
wrapped_adj_list = dict()
for k, v in six.iteritems(adj_list):
for k, v in adj_list.items():
wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}
return wrapped_adj_list
......@@ -5461,8 +5458,7 @@ class Program(object):
res_str += block.to_string(throw_on_error, with_details)
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.ProgramDesc.FromString(
six.binary_type(protostr))
proto = framework_pb2.ProgramDesc.FromString(bytes(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
......@@ -5545,7 +5541,7 @@ class Program(object):
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
for key, value in sorted(op.all_attrs().items()):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
......@@ -5553,7 +5549,6 @@ class Program(object):
1. To clone a test program, the sample code is:
.. code-block:: python
import six
import paddle
import paddle.static as static
import paddle.utils as utils
......@@ -5562,13 +5557,13 @@ class Program(object):
paddle.enable_static()
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
for name, value in sorted(prog.block(0).vars.items()):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
for key, value in sorted(op.all_attrs().items()):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
......@@ -5606,7 +5601,6 @@ class Program(object):
2. The clone method can be avoid if you create program for training and program for testing individually.
.. code-block:: python
import six
import paddle
import paddle.static as static
import paddle.utils as utils
......@@ -5615,13 +5609,13 @@ class Program(object):
paddle.enable_static()
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
for name, value in sorted(prog.block(0).vars.items()):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
for key, value in sorted(op.all_attrs().items()):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
......@@ -5663,7 +5657,7 @@ class Program(object):
self.desc)
forward_prog.blocks = [
Block(forward_prog, i)
for i in six.moves.range(forward_prog.desc.num_blocks())
for i in range(forward_prog.desc.num_blocks())
]
forward_prog._sync_with_cpp()
p = forward_prog._inference_optimize(prune_read_op=False)
......@@ -5672,9 +5666,7 @@ class Program(object):
p.current_block_idx = self.current_block_idx
p._seed = self._seed
p.desc = core.ProgramDesc(self.desc)
p.blocks = [
Block(p, i) for i in six.moves.range(self.desc.num_blocks())
]
p.blocks = [Block(p, i) for i in range(self.desc.num_blocks())]
p._current_role = self._current_role
p.__op_role_var = self.__op_role_var
......@@ -5737,7 +5729,7 @@ class Program(object):
targets = [targets]
for var in feeded_var_names:
if not isinstance(var, six.string_types):
if not isinstance(var, str):
raise ValueError(
"All feeded_var_names of Program._prune_with_input() can only be "
"str, but received %s." % type(var))
......@@ -5763,7 +5755,7 @@ class Program(object):
if not isinstance(t, Operator):
if isinstance(t, Variable):
name = t.name
elif isinstance(t, six.string_types):
elif isinstance(t, str):
name = str(t)
else:
raise ValueError(
......@@ -5803,9 +5795,7 @@ class Program(object):
res = Program()
res.desc, pruned_origin_block_id_map = core.prune(
self.desc, set(feeded_var_names), targets_idx)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res.blocks = [Block(res, i) for i in range(res.desc.num_blocks())]
res._sync_with_cpp()
res._copy_param_info_from(self)
......@@ -5854,18 +5844,16 @@ class Program(object):
root_block._remove_var(var.name().encode())
# change all `is_test` attributes to True
for i in six.moves.range(res.desc.num_blocks()):
for i in range(res.desc.num_blocks()):
block = res.desc.block(i)
for j in six.moves.range(block.op_size()):
for j in range(block.op_size()):
op = block.op(j)
if op.has_attr('is_test'):
op._set_attr('is_test', True)
if op.type() == "batch_norm":
# Remove the output ReserveSpace of batch_norm if exists.
op.remove_output("ReserveSpace")
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res.blocks = [Block(res, i) for i in range(res.desc.num_blocks())]
res._sync_with_cpp()
return res
......@@ -5884,16 +5872,14 @@ class Program(object):
res = Program()
res.desc = core.ProgramDesc(self.desc)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res.blocks = [Block(res, i) for i in range(res.desc.num_blocks())]
res._sync_with_cpp()
# Note: The op_role and op_role_var cann't be deleted currently,
# and we will try to remove them in the future.
common_clipped_attrs_list = ['op_callstack', 'with_quant_attr']
for i in six.moves.range(res.desc.num_blocks()):
for i in range(res.desc.num_blocks()):
block = res.desc.block(i)
for var in block.all_vars():
var.clear_is_parameter()
......@@ -6016,7 +6002,7 @@ class Program(object):
"""
p = Program()
p.desc = core.ProgramDesc(binary_str)
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p.blocks = [Block(p, i) for i in range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
......@@ -6033,7 +6019,7 @@ class Program(object):
"""
p = Program()
p.desc = desc
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p.blocks = [Block(p, i) for i in range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
......@@ -6304,7 +6290,7 @@ class Program(object):
if not pruned_origin_block_id_map:
pruned_origin_block_id_map = {
i: i
for i in six.moves.range(self.desc.num_blocks())
for i in range(self.desc.num_blocks())
}
# NOTE(zhiqiu): All vars in cloned program exist in original program.
......@@ -6556,8 +6542,7 @@ class Program(object):
.format(name)))
@six.add_metaclass(ParameterMetaClass)
class Parameter(Variable):
class Parameter(Variable, metaclass=ParameterMetaClass):
"""
Parameter is derived from Variable. A parameter is a persistable
Variable, and will be updated by optimizers after each iteration.
......
......@@ -14,14 +14,13 @@
import os
import random
import six
import functools
import subprocess
import logging
def crepr(v):
if isinstance(v, six.string_types):
if isinstance(v, str):
return '"%s"' % v
return str(v)
......@@ -104,7 +103,7 @@ class Graph(object):
stderr=subprocess.PIPE)
def _rank_repr(self):
ranks = sorted(six.iteritems(self.rank_groups),
ranks = sorted(self.rank_groups.items(),
key=functools.cmp_to_key(
lambda a, b: a[1].priority > b[1].priority))
repr = []
......@@ -149,7 +148,7 @@ class Node(object):
name=self.name,
label=self.label,
extra=',' + ','.join("%s=%s" % (key, crepr(value))
for key, value in six.iteritems(self.attrs))
for key, value in self.attrs.items())
if self.attrs else "")
return reprs
......@@ -174,7 +173,7 @@ class Edge(object):
target=self.target.name,
extra="" if not self.attrs else "[" +
','.join("{}={}".format(attr[0], crepr(attr[1]))
for attr in six.iteritems(self.attrs)) + "]")
for attr in self.attrs.items()) + "]")
return repr
......
......@@ -17,7 +17,6 @@ import logging
import hashlib
import json
import os
import six
import time
import collections
from threading import Thread, current_thread
......@@ -405,7 +404,7 @@ class TrainEpochRange(SerializableBase):
# registerd exes
d["exe_status"] = {}
e = d["exe_status"]
for k, t in six.iteritems(self._exe_status):
for k, t in self._exe_status.items():
e[t._key] = t._serialize()
return json.dumps(d)
......@@ -427,7 +426,7 @@ class TrainEpochRange(SerializableBase):
# exes status
e = d["exe_status"]
for k, v in six.iteritems(e):
for k, v in e.items():
t = ExeTrainStatus()
t._deserialize(v)
self._exe_status[k] = t
......@@ -480,7 +479,7 @@ class TrainEpochRange(SerializableBase):
return
e = self._exe_status
for k, t in six.iteritems(self._exe_status):
for k, t in self._exe_status.items():
m = PaddleModel(t._exe, t._program)
p = self._checker.get_exe_checkpoint_path(t._hash_key)
t._epoch_no = self.get()
......
......@@ -32,7 +32,6 @@ import paddle
import os
import sys
import six
import json
import re
import shutil
......@@ -285,7 +284,7 @@ class CollectiveOptimizer(DistributedOptimizer):
return self._optimizer.apply_gradients(params_grads)
def _check_condition(self, name, **kwargs):
for k, v in six.iteritems(kwargs):
for k, v in kwargs.items():
if v is True:
assert False, "you can't use %s and %s together" % (name, k)
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import collections
import six
from paddle.fluid import core
from paddle.fluid.framework import Block
......@@ -95,7 +94,7 @@ def _append_pserver_non_opt_ops(optimize_block, opt_op, origin_program, config):
"""
grad_block = None
for _, g in six.iteritems(var_dict):
for _, g in var_dict.items():
if _orig_varname(g.name) == _orig_varname(var.name):
# skip per trainer vars
if g.name.find(".trainer_") == -1:
......@@ -113,7 +112,7 @@ def _append_pserver_non_opt_ops(optimize_block, opt_op, origin_program, config):
program = optimize_block.program
# Append the ops for parameters that do not need to be optimized / updated
inputs = _get_input_map_from_op(origin_program.global_block().vars, opt_op)
for key, varlist in six.iteritems(inputs):
for key, varlist in inputs.items():
if not isinstance(varlist, list):
varlist = [varlist]
for i in range(len(varlist)):
......@@ -134,7 +133,7 @@ def _append_pserver_non_opt_ops(optimize_block, opt_op, origin_program, config):
outputs = _get_output_map_from_op(origin_program.global_block().vars,
opt_op)
for key, varlist in six.iteritems(outputs):
for key, varlist in outputs.items():
if not isinstance(varlist, list):
varlist = [varlist]
for i in range(len(varlist)):
......@@ -913,7 +912,7 @@ def build_pserver_startup_program_pass(program, p_main_program, config):
pserver_vars = p_main_program.global_block().vars
created_var_map = collections.OrderedDict()
for _, var in six.iteritems(pserver_vars):
for _, var in pserver_vars.items():
tmpvar = program.global_block()._clone_variable(var)
created_var_map[var.name] = tmpvar
......
......@@ -779,7 +779,7 @@ class CompileTimeStrategy(object):
block_map[varname] = []
block_map[varname].append((int(offset), int(size)))
for varname, split in six.iteritems(block_map):
for varname, split in block_map.items():
orig_var = self.merged_variable_map[varname]
if len(split) == 1:
......@@ -829,7 +829,7 @@ class CompileTimeStrategy(object):
def _dispatcher(self):
ps_dispatcher = RoundRobin(self.get_ps_endpoints())
ps_dispatcher.reset()
grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping))
grad_var_mapping_items = list(self.grad_var_mapping.items())
sparse_gradnames = [grad.name for _, grad in self.origin_sparse_pairs]
......
......@@ -14,7 +14,6 @@
# limitations under the License.
import os
import six
import collections
import warnings
import math
......@@ -1834,7 +1833,7 @@ def block_append_op(program, origin_program, block, op):
merge_ordereddict = origin_program.global_block().vars.copy()
merge_ordereddict.update(block.vars)
inputs = _get_input_map_from_op(merge_ordereddict, op)
for key, varlist in six.iteritems(inputs):
for key, varlist in inputs.items():
if not isinstance(varlist, list):
varlist = [varlist]
for var in varlist:
......@@ -1847,7 +1846,7 @@ def block_append_op(program, origin_program, block, op):
block._clone_variable(var, force_persistable=False)
outputs = _get_output_map_from_op(origin_program.global_block().vars, op)
for key, varlist in six.iteritems(outputs):
for key, varlist in outputs.items():
if not isinstance(varlist, list):
varlist = [varlist]
for var in varlist:
......@@ -1898,7 +1897,7 @@ def add_vars_by_var_list(var_name_list, origin_program, program, block):
def get_varlist_from_op_map(var_map):
var_list = []
for key, varlist in six.iteritems(var_map):
for key, varlist in var_map.items():
if not isinstance(varlist, list):
varlist = [varlist]
for i in range(len(varlist)):
......
......@@ -24,7 +24,6 @@ import copy
import errno
import time
import logging
import six
#from . import fs
from paddle.distributed.fleet.utils.fs import FS, LocalFS, FSFileExistsError, FSFileNotExistsError, ExecuteError, FSTimeOut, FSShellCmdAborted
from paddle.fluid import core
......@@ -91,7 +90,7 @@ class HDFSClient(FS):
self.pre_commands.append(dfs)
if configs:
for k, v in six.iteritems(configs):
for k, v in configs.items():
config_command = '-D%s=%s' % (k, v)
self.pre_commands.append(config_command)
......
......@@ -15,7 +15,6 @@
import os
import errno
import warnings
import six
import logging
import pickle
import contextlib
......@@ -1310,14 +1309,14 @@ def save_inference_model(dirname,
# "./infer_model".
"""
if isinstance(feeded_var_names, six.string_types):
if isinstance(feeded_var_names, str):
feeded_var_names = [feeded_var_names]
elif export_for_deployment:
if len(feeded_var_names) > 0:
# TODO(paddle-dev): polish these code blocks
if not (bool(feeded_var_names) and all(
isinstance(name, six.string_types)
for name in feeded_var_names)):
if not (bool(feeded_var_names)
and all(isinstance(name, str)
for name in feeded_var_names)):
raise ValueError("'feed_var_names' should be a list of str.")
if isinstance(target_vars, Variable):
......
......@@ -13,14 +13,13 @@
# limitations under the License.
import copy
import six
from .framework import Parameter, dtype_is_floating, _non_static_mode, OpProtoHolder, _global_flags
from . import unique_name
from paddle.fluid.initializer import Constant, Xavier
from .param_attr import ParamAttr
from . import core
from six.moves import zip
from .layer_helper_base import LayerHelperBase
from .dygraph_utils import _append_activation_in_dygraph
......@@ -76,7 +75,7 @@ class LayerHelper(LayerHelperBase):
raise ValueError("parameter number mismatch")
elif len(param_attr) == 1 and length != 1:
tmp = [None] * length
for i in six.moves.range(length):
for i in range(length):
tmp[i] = copy.deepcopy(param_attr[0])
param_attr = tmp
return param_attr
......@@ -143,7 +142,7 @@ class LayerHelper(LayerHelperBase):
act = self.kwargs.get('act', None)
if act is None:
return input_var
if isinstance(act, six.string_types):
if isinstance(act, str):
act = {'type': act}
else:
raise TypeError(str(act) + " should be unicode or str")
......
......@@ -23,7 +23,6 @@ from .nn import logical_and, logical_not, logical_or
from .utils import assert_same_structure, map_structure, hold_mutable_vars, copy_mutable_vars, padding_to_same_structure, is_sequence, pack_sequence_as, flatten, to_sequence
import numpy
import warnings
import six
from functools import reduce, partial
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ... import compat as cpt
......@@ -143,7 +142,7 @@ def select_input_with_buildin_type(inputs, mask, name):
raise RuntimeError(
f"Exceptions throwed while doing select_input on {name}:\n{e}")
elif (isinstance(false_var, (support_ret_buildin_type))
elif (isinstance(false_var, support_ret_buildin_type)
and isinstance(false_var, type(true_var))):
if false_var == true_var:
return false_var
......@@ -962,7 +961,7 @@ class StaticRNN(object):
boot_memories = []
pre_memories = []
memories = []
for _, mem in six.iteritems(self.memories):
for _, mem in self.memories.items():
boot_memories.append(mem.init)
pre_memories.append(mem.pre_mem.name)
assert mem.mem is not None, "%s should be updated in every step." % (
......@@ -1205,7 +1204,7 @@ class While(object):
})
support_ret_buildin_type = (bool, float, six.integer_types)
support_ret_buildin_type = (bool, float, int)
def assign_skip_lod_tensor_array(input, output):
......
......@@ -29,7 +29,6 @@ from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
......@@ -2199,7 +2198,7 @@ def multi_box_head(inputs,
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
for ratio in range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
......@@ -2338,7 +2337,7 @@ def multi_box_head(inputs,
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
for ratio in range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
......
......@@ -110,11 +110,11 @@ def data(name,
"""
helper = LayerHelper('data', **locals())
check_type(name, 'name', (six.binary_type, six.text_type), 'data')
check_type(name, 'name', (bytes, str), 'data')
check_type(shape, 'shape', (list, tuple), 'data')
shape = list(shape)
for i in six.moves.range(len(shape)):
for i in range(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
......
......@@ -17,7 +17,7 @@ import functools
import warnings
import string
from six.moves import cStringIO
from io import StringIO
from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, _non_static_mode, in_dygraph_mode, _in_legacy_dygraph
from ..layer_helper import LayerHelper
......@@ -80,7 +80,7 @@ def _generate_doc_string_(op_proto,
if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError("OpProto should be `framework_pb2.OpProto`")
buf = cStringIO()
buf = StringIO()
buf.write(escape_math(op_proto.comment))
buf.write('\nArgs:\n')
for each_input in op_proto.inputs:
......
......@@ -20,7 +20,6 @@ import inspect
import warnings
import numpy as np
import six
import paddle
from ..layer_helper import LayerHelper
......@@ -6336,7 +6335,7 @@ def one_hot(input, depth, allow_out_of_range=False):
helper = LayerHelper("one_hot", **locals())
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot')
check_type(depth, 'depth', (six.integer_types, Variable), 'one_hot')
check_type(depth, 'depth', (int, Variable), 'one_hot')
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(depth, Variable):
......
......@@ -151,8 +151,7 @@ class RNNCell(object):
# TODO: Add check for the illegal
if isinstance(seq, dict):
return True
return (isinstance(seq, Sequence)
and not isinstance(seq, six.string_types))
return (isinstance(seq, Sequence) and not isinstance(seq, str))
class Shape(object):
......
......@@ -82,7 +82,7 @@ def is_sequence(seq):
"""
if isinstance(seq, dict):
return True
return (isinstance(seq, Sequence) and not isinstance(seq, six.string_types))
return (isinstance(seq, Sequence) and not isinstance(seq, str))
def _hash_with_id(*args):
......@@ -99,7 +99,7 @@ def _sorted(dict_):
Returns a sorted list of the dict keys, with error if keys not sortable.
"""
try:
return sorted(six.iterkeys(dict_))
return sorted(dict_.keys())
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
......@@ -159,11 +159,10 @@ def _sequence_like(instance, args):
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
return type(instance)(
(key, result[key]) for key in six.iterkeys(instance))
return type(instance)((key, result[key]) for key in instance.keys())
elif (isinstance(instance, tuple) and hasattr(instance, "_fields")
and isinstance(instance._fields, Sequence)
and all(isinstance(f, six.string_types) for f in instance._fields)):
and all(isinstance(f, str) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
......@@ -256,8 +255,8 @@ def _recursive_assert_same_structure(nest1, nest2, check_types):
"structure has type %s, while second structure has type %s." %
(type_nest1, type_nest2))
if isinstance(nest1, dict):
keys1 = set(six.iterkeys(nest1))
keys2 = set(six.iterkeys(nest2))
keys1 = set(nest1.keys())
keys2 = set(nest2.keys())
if keys1 != keys2:
raise ValueError(
"The two dictionaries don't have the same set of keys. First "
......@@ -382,7 +381,7 @@ def _convert_to_tensor_list(old_list, dtype="int32"):
ele.stop_gradient = True
new_list_tensor.append(ele)
else:
assert isinstance(ele, six.integer_types)
assert isinstance(ele, int)
temp_out = fill_constant([1], dtype, ele, force_cpu=True)
new_list_tensor.append(temp_out)
return new_list_tensor
......@@ -414,7 +413,7 @@ def check_shape(shape):
raise ValueError(
"All elements in ``shape`` must be positive when it's a list or tuple"
)
if not isinstance(ele, six.integer_types):
if not isinstance(ele, int):
raise TypeError(
"All elements in ``shape`` must be integers when it's a list or tuple"
)
......
......@@ -17,8 +17,6 @@ Fluid Metrics
import numpy as np
import copy
import warnings
import six
from .layer_helper import LayerHelper
from .initializer import Constant
......@@ -118,10 +116,9 @@ class MetricBase(object):
"""
states = {
attr: value
for attr, value in six.iteritems(self.__dict__)
if not attr.startswith("_")
for attr, value in self.__dict__.items() if not attr.startswith("_")
}
for attr, value in six.iteritems(states):
for attr, value in states.items():
if isinstance(value, int):
setattr(self, attr, 0)
elif isinstance(value, float):
......@@ -147,8 +144,7 @@ class MetricBase(object):
"""
states = {
attr: value
for attr, value in six.iteritems(self.__dict__)
if not attr.startswith("_")
for attr, value in self.__dict__.items() if not attr.startswith("_")
}
config = {}
config.update({"name": self._name, "states": copy.deepcopy(states)})
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from . import layers
from .data_feeder import check_variable_and_dtype, convert_dtype
from ..utils import deprecated
......@@ -227,7 +226,7 @@ def img_conv_group(input,
conv_with_batchnorm = __extend_list__(conv_with_batchnorm)
conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate)
for i in six.moves.range(len(conv_num_filter)):
for i in range(len(conv_num_filter)):
local_conv_act = conv_act
if conv_with_batchnorm[i]:
local_conv_act = None
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import numpy as np
import six
import paddle.fluid.core as core
import paddle.fluid.proto.framework_pb2 as framework_pb2
......@@ -27,13 +26,13 @@ def get_all_op_protos():
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))
op_proto = framework_pb2.OpProto.FromString(bytes(pbstr))
ret_values.append(op_proto)
return ret_values
def is_str(s):
return isinstance(s, six.string_types)
return isinstance(s, str)
class OpDescCreationMethod(object):
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import numpy as np
import six
import os
import logging
from collections import defaultdict
......@@ -6227,8 +6226,7 @@ class RecomputeOptimizer(Optimizer):
), "_checkpoints should be a list of Variable or a list of String"
for ckpt in checkpoints:
assert (
isinstance(ckpt, six.string_types)
or isinstance(ckpt, Variable)
isinstance(ckpt, str) or isinstance(ckpt, Variable)
), "_checkpoints should be a list of Variable or a list of String"
self._checkpoints = checkpoints
......
......@@ -12,10 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import warnings
import sys
from .initializer import Initializer, Xavier, Constant
from .regularizer import WeightDecayRegularizer
from paddle.fluid.data_feeder import check_type
......@@ -172,7 +168,7 @@ class ParamAttr(object):
return [ParamAttr._to_attr(a) for a in arg]
elif isinstance(arg, ParamAttr):
return arg
elif isinstance(arg, six.string_types):
elif isinstance(arg, str):
return ParamAttr(name=arg)
elif isinstance(arg, Initializer):
return ParamAttr(initializer=arg)
......
......@@ -15,7 +15,6 @@
from . import core
from .wrapped_decorator import signature_safe_contextmanager
import os
import six
import sys
from paddle.utils.deprecated import deprecated
......
......@@ -14,7 +14,6 @@
import numpy as np
import time
import six
from paddle.fluid.op import Operator
from op_test import OpTest
......@@ -38,8 +37,7 @@ class BenchmarkSuite(OpTest):
expect_t = np.array(item_cpu_out)
actual = item_gpu_out
actual_t = np.array(item_gpu_out)
var_name = variable if isinstance(
variable, six.string_types) else variable.name
var_name = variable if isinstance(variable, str) else variable.name
np.testing.assert_allclose(actual_t,
expect_t,
rtol=1e-05,
......@@ -49,7 +47,7 @@ class BenchmarkSuite(OpTest):
def _get_input_names(self):
inputs = []
for name, value in six.iteritems(self.inputs):
for name, value in self.inputs.items():
if isinstance(value, list):
inputs.extend([sub_name for sub_name, _ in value])
inputs.append(name)
......@@ -57,7 +55,7 @@ class BenchmarkSuite(OpTest):
def _get_output_names(self):
outputs = []
for var_name, var in six.iteritems(self.outputs):
for var_name, var in self.outputs.items():
if isinstance(var, list):
for sub_var_name, sub_var in var:
outputs.append(sub_var_name)
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import numpy as np
import six
import paddle
import paddle.fluid as fluid
......@@ -824,7 +823,7 @@ def fake_data_reader():
def __reader__():
iteration = TrainTaskConfig.batch_size * batch_num
for _ in six.moves.range(iteration):
for _ in range(iteration):
# random data
np.random.seed = 90
src_word_np = np.arange(1, seq_len + 1).reshape([seq_len
......
......@@ -17,7 +17,6 @@ import sys
import numpy as np
import pickle
import six
import paddle
import paddle.fluid as fluid
......@@ -149,7 +148,7 @@ class TestDistSaveLoad2x2(TestDistSimnetBow2x2):
if save_mode == "LOCAL":
if need_save:
for _ in six.moves.xrange(RUN_STEP):
for _ in range(RUN_STEP):
loss, = exe.run(fetch_list=[avg_cost.name],
feed=feeder.feed(get_data()))
if need_save and model_dir:
......@@ -163,14 +162,14 @@ class TestDistSaveLoad2x2(TestDistSimnetBow2x2):
skip_steps = int(os.getenv("SKIP_STEPS"))
loss = None
if need_save:
for idx in six.moves.xrange(8):
for idx in range(8):
loss, = exe.run(fetch_list=[avg_cost.name],
feed=feeder.feed(get_data()))
if need_save and model_dir and idx == skip_steps and args.trainer_id == 0:
io.save_persistables(startup_exe, model_dir,
trainer_prog)
else:
for idx in six.moves.xrange(8):
for idx in range(8):
data = get_data()
if idx <= skip_steps:
continue
......
......@@ -15,7 +15,6 @@
import paddle
import paddle.fluid as fluid
import os
import six
import tarfile
import string
import re
......@@ -154,9 +153,8 @@ def tokenize(pattern):
while tf != None:
if bool(pattern.match(tf.name)):
# newline and punctuations removal and ad-hoc tokenization.
yield tarf.extractfile(tf).read().rstrip(
six.b("\n\r")).translate(None, six.b(
string.punctuation)).lower().split()
yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate(
None, string.punctuation.encode('latin-1')).lower().split()
tf = tarf.next()
......
......@@ -15,7 +15,7 @@
import numpy as np
import time
import os
import six
import functools
import time
from functools import partial
from os.path import expanduser
......@@ -306,7 +306,7 @@ def pad_batch_data(insts,
"""
return_list = []
max_len = max(len(inst) for inst in insts)
num_token = six.moves.reduce(lambda x, y: x + y,
num_token = functools.reduce(lambda x, y: x + y,
[len(inst)
for inst in insts]) if return_num_token else 0
# Any token included in dict can be used to pad, since the paddings' loss
......@@ -547,7 +547,7 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
np.log(TrainTaskConfig.label_smooth_eps /
(ModelHyperParams.trg_vocab_size - 1) + 1e-20))
init = False
for pass_id in six.moves.xrange(TrainTaskConfig.pass_num):
for pass_id in range(TrainTaskConfig.pass_num):
pass_start_time = time.time()
for batch_id, data in enumerate(train_data()):
if batch_id >= RUN_STEP:
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import numpy as np
import unittest
......@@ -106,7 +105,7 @@ class MainNetWithDict(fluid.dygraph.Layer):
# Test to call function defined outside of class.
def update_cache(cache):
for k, val in six.iteritems(cache):
for k, val in cache.items():
cache[k] = fluid.layers.softmax(val)
return cache
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import paddle
import unittest
......@@ -105,8 +104,7 @@ class CheckOpAttr(unittest.TestCase):
ops = cur_block.ops
for op in ops:
if op.type not in self.infos: continue
for attr_name, expect_vals in six.iteritems(
self.infos[op.type]):
for attr_name, expect_vals in self.infos[op.type].items():
op_vals = op.desc.attr(attr_name)
if not isinstance(expect_vals, list):
expect_vals = [expect_vals]
......
......@@ -14,7 +14,6 @@
import pickle
import warnings
import six
from functools import partial
import numpy as np
......@@ -271,11 +270,10 @@ def load(program, model_path, executor=None, var_list=None):
"An UnicodeDecodeError is catched, which might be caused by loading "
"a python2 saved model. Encoding of pickle.load would be set and "
"load again automatically.")
if six.PY3:
load_bak = pickle.load
pickle.load = partial(load_bak, encoding="latin1")
fluid.load(program, model_path, executor, var_list)
pickle.load = load_bak
load_bak = pickle.load
pickle.load = partial(load_bak, encoding="latin1")
fluid.load(program, model_path, executor, var_list)
pickle.load = load_bak
def load_dygraph(model_path, keep_name_table=False):
......@@ -291,10 +289,9 @@ def load_dygraph(model_path, keep_name_table=False):
"An UnicodeDecodeError is catched, which might be caused by loading "
"a python2 saved model. Encoding of pickle.load would be set and "
"load again automatically.")
if six.PY3:
load_bak = pickle.load
pickle.load = partial(load_bak, encoding="latin1")
para_dict, opti_dict = fluid.load_dygraph(
model_path, keep_name_table=keep_name_table)
pickle.load = load_bak
return para_dict, opti_dict
load_bak = pickle.load
pickle.load = partial(load_bak, encoding="latin1")
para_dict, opti_dict = fluid.load_dygraph(
model_path, keep_name_table=keep_name_table)
pickle.load = load_bak
return para_dict, opti_dict
......@@ -13,7 +13,6 @@
# limitations under the License.
import numpy as np
import six
def fake_imdb_reader(word_dict_size,
......@@ -23,7 +22,7 @@ def fake_imdb_reader(word_dict_size,
class_dim=2):
def __reader__():
for _ in six.moves.range(sample_num):
for _ in range(sample_num):
length = np.random.random_integers(low=lower_seq_len,
high=upper_seq_len,
size=[1])[0]
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import paddle.fluid as fluid
from paddle.fluid.framework import Variable
......@@ -44,13 +43,13 @@ class FeedDataReader(object):
next_data = next(self._iter)
feed_data = dict()
assert len(self._feed_list) == len(next_data)
for key, value in six.moves.zip(self._feed_list, next_data):
for key, value in zip(self._feed_list, next_data):
feed_data[key] = value
return feed_data
def _feed_parallel_executor(self, device_num):
feed_data = []
for _ in six.moves.range(device_num):
for _ in range(device_num):
feed_data.append(self._feed_executor())
return feed_data
......
......@@ -13,7 +13,6 @@
# limitations under the License.
"""This is the lib for gradient checker unittest."""
import six
import numpy as np
from itertools import product
import paddle
......@@ -139,7 +138,7 @@ def _compute_numerical_jacobian(program, x, y, place, scope, delta):
np_type = dtype_to_np_dtype(x.dtype)
jacobian = [make_jacobian(x, _product(yi.shape), np_type) for yi in y]
for i in six.moves.xrange(x_size):
for i in range(x_size):
orig = _get_item(x_t, i, np_type)
x_pos = orig + delta
_set_item(x_t, i, x_pos, np_type)
......@@ -151,7 +150,7 @@ def _compute_numerical_jacobian(program, x, y, place, scope, delta):
_set_item(x_t, i, orig, np_type)
for j in six.moves.xrange(len(y)):
for j in range(len(y)):
jacobian[j][i, :] = (y_pos[j] - y_neg[j]) / delta / 2.
return jacobian
......@@ -204,12 +203,12 @@ def _compute_analytical_jacobian(program, x, y, place, scope):
filted = [(i, dxi) for i, dxi in enumerate(dx) if dxi is not None]
filted_idx, filted_dx = zip(*filted)
for i in six.moves.xrange(y_size):
for i in range(y_size):
_set_item(dy_t, i, 1, np_type)
dx_res = exe.run(program, scope=scope, fetch_list=filted_dx)
for j in six.moves.xrange(len(filted_dx)):
for j in range(len(filted_dx)):
dx_idx = filted_idx[j]
if dx_res[j] is not None:
jacobian[dx_idx][:, i] = dx_res[j].flatten()
......@@ -551,7 +550,7 @@ def get_static_double_grad(x,
program = fluid.default_main_program()
scope = fluid.executor.global_scope()
y_grads = []
for i in six.moves.xrange(len(y)):
for i in range(len(y)):
yi = y[i]
dyi_name = _append_grad_suffix_(yi.name)
np_type = dtype_to_np_dtype(yi.dtype)
......@@ -768,7 +767,7 @@ def double_grad_check_for_dygraph(func,
"please check the python api unit test used."
raise RuntimeError(msg)
for i in six.moves.xrange(len(static_double_grad)):
for i in range(len(static_double_grad)):
if not np.allclose(static_double_grad[i], eager_double_grad[i], rtol,
atol):
msg = 'Check eager double result fail. Mismatch between static_graph double grad ' \
......@@ -802,7 +801,7 @@ def get_static_triple_grad(x,
program = fluid.default_main_program()
scope = fluid.executor.global_scope()
y_grads = []
for i in six.moves.xrange(len(y)):
for i in range(len(y)):
yi = y[i]
dyi_name = _append_grad_suffix_(yi.name)
np_type = dtype_to_np_dtype(yi.dtype)
......@@ -937,7 +936,7 @@ def triple_grad_check_for_dygraph(func,
"please check the python api unit test used."
raise RuntimeError(msg)
for i in six.moves.xrange(len(static_triple_grad)):
for i in range(len(static_triple_grad)):
if not np.allclose(static_triple_grad[i], eager_triple_grad[i], rtol,
atol):
msg = 'Check eager double result fail. Mismatch between static_graph double grad ' \
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import random
import unittest
import warnings
......@@ -104,8 +103,7 @@ class PassTest(unittest.TestCase):
for p in pass_builder.all_passes():
p.apply(graph)
opt_program.blocks = [
Block(opt_program, i)
for i in six.moves.range(opt_program.desc.num_blocks())
Block(opt_program, i) for i in range(opt_program.desc.num_blocks())
]
opt_program._sync_with_cpp()
return opt_program
......@@ -147,7 +145,7 @@ class PassTest(unittest.TestCase):
len(self.fetch_list) == len(outs_opt),
"Checking the number of fetchs failed. Expected: {}, Received: {}".
format(len(self.fetch_list), len(outs_opt)))
for i in six.moves.xrange(len(self.fetch_list)):
for i in range(len(self.fetch_list)):
is_allclose = np.allclose(outs_opt[i], outs[i], atol=atol)
if not is_allclose:
a = outs_opt[i]
......@@ -207,7 +205,7 @@ class PassTest(unittest.TestCase):
self.main_program.num_blocks, program.num_blocks))
is_different = False
for i in six.moves.xrange(program.num_blocks):
for i in range(program.num_blocks):
if len(self.main_program.block(i).ops) != len(program.block(i).ops):
# The number of ops in the block i of the origin program and
# the optimized program is different.
......
......@@ -15,7 +15,6 @@
import unittest
import paddle
import paddle.fluid as fluid
import six
from paddle.fluid.framework import IrGraph
from paddle.fluid.tests.unittests.op_test import OpTestTool
......@@ -37,7 +36,7 @@ class TestQuantizationSubGraph(unittest.TestCase):
dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
for _ in range(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = paddle.mean(loss)
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import abc
import unittest
import numpy as np
......@@ -24,8 +23,7 @@ from paddle.fluid.tests.unittests.test_gelu_op import gelu
@OpTestTool.skip_if_not_cpu_bf16()
@six.add_metaclass(abc.ABCMeta)
class MKLDNNBF16ActivationOp(object):
class MKLDNNBF16ActivationOp(metaclass=abc.ABCMeta):
@abc.abstractmethod
def config(self):
......
......@@ -20,7 +20,6 @@ import signal
import time
import socket
from contextlib import closing
from six import string_types
import math
import paddle
import paddle.fluid as fluid
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册