未验证 提交 3a7fbb86 编写于 作者: X Xu Han 提交者: GitHub

Merge pull request #1330 from luckyh/qemu-python3-map-filter-zip

[qemu] Python 3: Update map(), filter() and zip()
......@@ -66,7 +66,7 @@ def run(test, params, env):
return set(process.system_output(cmd, ignore_status=True,
shell=True).splitlines())
snapshots = map(lambda x: os.path.join(image_dir, x), ["sn1", "sn2"])
snapshots = list(map(lambda x: os.path.join(image_dir, x), ["sn1", "sn2"]))
try:
error_context.context("Create snapshots-chain(base->sn1->sn2)",
logging.info)
......
......@@ -252,8 +252,8 @@ def run(test, params, env):
else:
for block in blocks:
if block['device'] == qemu_cdrom_device:
key = filter(lambda x: re.match(r"tray.*open", x),
block.keys())
key = list(filter(lambda x: re.match(r"tray.*open", x),
block.keys()))
# compatible rhel6 and rhel7 diff qmp output
if not key:
break
......
......@@ -1021,8 +1021,8 @@ def run(test, params, env):
cpuf_model += ",-" + str(fdel)
smp = int(params["smp"])
disable_cpus = map(lambda cpu: int(cpu),
params.get("disable_cpus", "").split())
disable_cpus = list(map(lambda cpu: int(cpu),
params.get("disable_cpus", "").split()))
install_path = "/tmp"
......
......@@ -219,14 +219,14 @@ class LiveBackup(block_copy.BlockCopy):
Create files and record m5 values of them.
"""
file_names = self.params["file_names"]
return map(self.create_file, file_names.split())
return list(map(self.create_file, file_names.split()))
def verify_md5s(self):
"""
Check if the md5 values matches the record ones.
"""
file_names = self.params["file_names"]
return map(self.verify_md5, file_names.split())
return list(map(self.verify_md5, file_names.split()))
def verify_efficiency(self):
"""
......
......@@ -174,7 +174,7 @@ def run(test, params, env):
disk_path = os.path.join("/", "dev", "disk", "by-id")
disks = session.cmd("ls %s" % disk_path).split("\n")
session.close()
disk = filter(lambda x: x.endswith(disk_serial), disks)
disk = list(filter(lambda x: x.endswith(disk_serial), disks))
if not disk:
return None
return os.path.join(disk_path, disk[0])
......@@ -255,8 +255,8 @@ def run(test, params, env):
def find_disk(self):
disk_path = os.path.join("/", "dev", "disk", "by-path")
disks = process.run("ls %s" % disk_path).stdout.split("\n")
disk = filter(lambda x: self.server_name in x, disks)
if disk is []:
disk = list(filter(lambda x: self.server_name in x, disks))
if not disk:
return None
return os.path.join(disk_path, disk[0].strip())
......
......@@ -287,7 +287,7 @@ def run(test, params, env):
output = session.cmd_output(cmd, timeout=cmd_timeout)
disks = re.findall(re_str, output)
disks = map(string.strip, disks)
disks = list(map(string.strip, disks))
disks.sort()
logging.debug("Volume list that meet regular expressions: %s",
" ".join(disks))
......
......@@ -151,7 +151,7 @@ def run(test, params, env):
utils_misc.generate_random_string(4))
blk_size_list = params.get("blk_size_list", "8k").split()
test_file_list = map(lambda x: test_file_prefix + x, blk_size_list)
test_file_list = list(map(lambda x: test_file_prefix + x, blk_size_list))
if (not nfs_server) or (not nfs_path) or (not mnt_point):
_clean_up(STEP_2)
......
......@@ -135,9 +135,9 @@ def run(test, params, env):
"username", "password", "shell_client", "shell_port", "os_type"]
vms_info = []
for _ in params.get("vms").split():
info = map(
info = list(map(
lambda x: params.object_params(_).get(x),
guest_info)
guest_info))
vm = env.get_vm(_)
vm.verify_alive()
session = vm.wait_for_login(timeout=login_timeout)
......@@ -221,7 +221,8 @@ def run(test, params, env):
test.error("This is a openvswitch only test")
extra_options = params.get("netperf_client_options", " -l 60")
rate_brust_pairs = params.get("rate_brust_pairs").split()
rate_brust_pairs = map(lambda x: map(int, x.split(',')), rate_brust_pairs)
rate_brust_pairs = list(
map(lambda x: map(int, x.split(',')), rate_brust_pairs))
results = []
try:
netperf_clients, netperf_servers = setup_netperf_env()
......
......@@ -42,8 +42,8 @@ def run(test, params, env):
restore_mmu_cmd = None
error_context.context("Enable ept(npt)", logging.info)
try:
flag = filter(lambda x: x in utils_misc.get_cpu_flags(),
['ept', 'npt'])[0]
flag = list(filter(lambda x: x in utils_misc.get_cpu_flags(),
['ept', 'npt']))[0]
except IndexError:
logging.warn("Host doesn't support ept(npt)")
else:
......
......@@ -67,7 +67,7 @@ def run(test, params, env):
# rebase snapshot image
rebase_chain = params.get("rebase_list", "").split(";")
for images in rebase_chain:
images = map(lambda x: x.strip(), images.split(">"))
images = list(map(lambda x: x.strip(), images.split(">")))
try:
image = images[0]
base = images[1]
......
......@@ -26,8 +26,8 @@ class QemuIOConfig(object):
self.tmpdir = test.tmpdir
self.qemu_img_binary = utils_misc.get_qemu_img_binary(params)
self.raw_files = ["stg1.raw", "stg2.raw"]
self.raw_files = map(lambda f: os.path.join(self.tmpdir, f),
self.raw_files)
self.raw_files = list(map(lambda f: os.path.join(self.tmpdir, f),
self.raw_files))
# Here we're trying to choose fairly explanatory names so it's less
# likely that we run in conflict with other devices in the system
self.vgtest_name = params.get("vgtest_name", "vg_kvm_test_qemu_io")
......
......@@ -25,8 +25,9 @@ def run(test, params, env):
vm.verify_alive()
session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))
qmp_monitor = filter(lambda x: x.protocol == "qmp", vm.monitors)[0]
humam_monitor = filter(lambda x: x.protocol == "human", vm.monitors)[0]
qmp_monitor = list(filter(lambda x: x.protocol == "qmp", vm.monitors))[0]
humam_monitor = list(
filter(lambda x: x.protocol == "human", vm.monitors))[0]
callback = {"host_cmd": commands.getoutput,
"guest_cmd": session.cmd,
"monitor_cmd": humam_monitor.send_args_cmd,
......
......@@ -274,7 +274,7 @@ def run(test, params, env):
for url in latest_pkgs_url:
if "debuginfo" in url and not debuginfo:
continue
upgrade = bool(filter(lambda x: x in url, pkgs))
upgrade = bool(list(filter(lambda x: x in url, pkgs)))
logging.info("Install packages from: %s" % url)
install_rpm(session, url, upgrade, nodeps, timeout)
......
......@@ -38,7 +38,7 @@ def barrier_2(test, vm, words, params, debug_dir, data_scrdump_filename,
# Parse barrier command line
_, dx, dy, x1, y1, md5sum, timeout = words[:7]
dx, dy, x1, y1, timeout = map(int, [dx, dy, x1, y1, timeout])
dx, dy, x1, y1, timeout = list(map(int, [dx, dy, x1, y1, timeout]))
# Define some paths
scrdump_filename = os.path.join(debug_dir, "scrdump.ppm")
......
......@@ -120,12 +120,13 @@ class TimedriftTest(object):
timeout=120)
host_timestr = process.system_output(host_epoch_time_cmd,
shell=True)
epoch_host, epoch_guest = map(lambda x: re.findall(regex, x)[0],
[host_timestr, guest_timestr])
epoch_host, epoch_guest = list(
map(lambda x: re.findall(regex, x)[0],
[host_timestr, guest_timestr]))
except IndexError:
logging.debug("Host Time: %s," % guest_timestr +
"Guest Time: %s" % guest_timestr)
return map(float, [epoch_host, epoch_guest])
return list(map(float, [epoch_host, epoch_guest]))
@error_context.context_aware
def verify_clock_source(self, session):
......
......@@ -73,7 +73,7 @@ def run(test, params, env):
host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"]
host_cpu_num = process.system_output(host_cpu_cnt_cmd, shell=True).strip()
host_cpu_list = (_ for _ in range(int(host_cpu_num)))
cpu_pin_list = zip(vm.vcpu_threads, host_cpu_list)
cpu_pin_list = list(zip(vm.vcpu_threads, host_cpu_list))
if len(cpu_pin_list) < len(vm.vcpu_threads):
test.cancel("There isn't enough physical cpu to pin all the vcpus")
for vcpu, pcpu in cpu_pin_list:
......
......@@ -115,7 +115,7 @@ def run(test, params, env):
host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"]
host_cpu_num = process.system_output(host_cpu_cnt_cmd, shell=True).strip()
host_cpu_list = (_ for _ in range(int(host_cpu_num)))
cpu_pin_list = zip(vm.vcpu_threads, host_cpu_list)
cpu_pin_list = list(zip(vm.vcpu_threads, host_cpu_list))
if len(cpu_pin_list) < len(vm.vcpu_threads):
test.cancel("There isn't enough physical cpu to pin all the vcpus")
check_one_cpu_pinned = False
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册