diff --git a/qemu/tests/blk_stream.py b/qemu/tests/blk_stream.py index 1222a1bed41a9edcdfeee04dfeba75155f50ff59..58ad67f333e716faf8d3dbec0192522cad56e07e 100644 --- a/qemu/tests/blk_stream.py +++ b/qemu/tests/blk_stream.py @@ -34,7 +34,7 @@ class BlockStream(block_copy.BlockCopy): status = self.get_status() if not status: raise error.TestFail("no active job found") - msg = "block stream job runing, " + msg = "block stream job running, " if default_speed: msg += "with limited speed %s B/s" % default_speed else: diff --git a/qemu/tests/block_copy.py b/qemu/tests/block_copy.py index c052033023e6c43559f860f456b5ed6163cc2140..457efb76bff8a9a5a1e13cd4ac3a10b7bac5bbef 100644 --- a/qemu/tests/block_copy.py +++ b/qemu/tests/block_copy.py @@ -176,7 +176,7 @@ class BlockCopy(object): self.vm.monitor.get_event("RESET"), timeout=timeout) if not reseted: - raise error.TestFail("No RESET event recived after" + raise error.TestFail("No RESET event received after" "execute system_reset %ss" % timeout) self.vm.monitor.clear_event("RESET") else: diff --git a/qemu/tests/block_stream_simple.py b/qemu/tests/block_stream_simple.py index 2055e596f1861be95aca79605c38470198f00a71..b89abdd7175bc80a6b178fe7d8456415c200ace0 100644 --- a/qemu/tests/block_stream_simple.py +++ b/qemu/tests/block_stream_simple.py @@ -10,7 +10,7 @@ class BlockStreamSimple(blk_stream.BlockStream): @error.context_aware def query_status(self): """ - query runing block streaming job info; + query running block streaming job info; """ error.context("query job status", logging.info) if not self.get_status(): diff --git a/qemu/tests/block_stream_stress.py b/qemu/tests/block_stream_stress.py index 094172194d60604922d1d1cc97e362a8d3642bed..8d3f95f25e32f559a5d5af0245471b3c121e3c85 100644 --- a/qemu/tests/block_stream_stress.py +++ b/qemu/tests/block_stream_stress.py @@ -80,7 +80,7 @@ class BlockStreamStress(blk_stream.BlockStream): text="wait stress app quit", step=1.0, timeout=params["wait_timeout"]) if not stoped: - raise error.TestFail("stress app is still runing") + raise error.TestFail("stress app is still running") def app_runing(self): """ diff --git a/qemu/tests/boot_order_check.py b/qemu/tests/boot_order_check.py index eb99502af865be6ed8c1290ae14db2c92a53e288..f085fbff97a90462e209d811e772db17ddde29a5 100644 --- a/qemu/tests/boot_order_check.py +++ b/qemu/tests/boot_order_check.py @@ -41,7 +41,7 @@ def run_boot_order_check(test, params, env): list_nic_addr = [] # As device id in the last line of info pci output - # We need reverse the pci infomation to get the pci addr which is in the + # We need reverse the pci information to get the pci addr which is in the # front row. pci_info = vm.monitor.info("pci") pci_list = str(pci_info).split("\n") diff --git a/qemu/tests/cdrom.py b/qemu/tests/cdrom.py index 9c6f71e0fabf28c2910fe801b019c0ab63003fab..ed20e7eadc645a495ef32672fac7aaf183738b17 100644 --- a/qemu/tests/cdrom.py +++ b/qemu/tests/cdrom.py @@ -65,7 +65,7 @@ def run_cdrom(test, params, env): """ Creates 'new' iso image with one file on it - @param params: paramters for test + @param params: parameters for test @param name: name of new iso image file @param preapre: if True then it prepare cd images. @param file_size: Size of iso image in MB diff --git a/qemu/tests/cfg/kernbench2.cfg b/qemu/tests/cfg/kernbench2.cfg index ebac560f150da52f25eaa257fe6a6fa4066daa85..b52eec3220ee86ce2f2d6751af3b02b1acb1bfd5 100644 --- a/qemu/tests/cfg/kernbench2.cfg +++ b/qemu/tests/cfg/kernbench2.cfg @@ -7,7 +7,7 @@ re_result = real\s*([0-9\.]*)m([0-9\.]*)s result_file = /tmp/kernbench2_result.log # Please update following link before running this case. - # kernel code unsed in testing e.g kernel-2.6.32-59.tar + # kernel code unused in testing e.g kernel-2.6.32-59.tar # kernel_link = # kernbench code link kernbench-0.50.tar.gz # kernbench_link = diff --git a/qemu/tests/cfg/ksm_overcommit.cfg b/qemu/tests/cfg/ksm_overcommit.cfg index 1dfd079da05f9ad099508cec5fd0646b795e685e..444b0cbcf042dbfc604e27340a1b9ace0dae1011 100644 --- a/qemu/tests/cfg/ksm_overcommit.cfg +++ b/qemu/tests/cfg/ksm_overcommit.cfg @@ -11,7 +11,7 @@ no hugepages # Overcommit of host memmory ksm_overcommit_ratio = 3 - # Max paralel runs machine + # Max parallel runs machine ksm_parallel_ratio = 4 # Host memory reserve (default - best fit for used mem) # ksm_host_reserve = 512 diff --git a/qemu/tests/cfg/qemu_img.cfg b/qemu/tests/cfg/qemu_img.cfg index 1d7ffd8aec14b7e133ae81eaf34ca99f6780260c..9b8590d13a316040f8f1fe5560cb07e3b04720b1 100644 --- a/qemu/tests/cfg/qemu_img.cfg +++ b/qemu/tests/cfg/qemu_img.cfg @@ -10,7 +10,7 @@ force_create_image_dd = no remove_image_dd = yes create_image_cmd = "dd if=/dev/zero of=%s bs=1G count=1" - # Test the convertion from 'dd_image_name' to specified format + # Test the conversion from 'dd_image_name' to specified format supported_image_formats = qcow2 raw qed - create: subcommand = create diff --git a/qemu/tests/cfg/time_manage.cfg b/qemu/tests/cfg/time_manage.cfg index 38a1f9a86bde0cf24aca693591dc28de33581429..3df5731cc939f7f089defb50234241534ecf1362 100644 --- a/qemu/tests/cfg/time_manage.cfg +++ b/qemu/tests/cfg/time_manage.cfg @@ -4,7 +4,7 @@ extra_params +=" -rtc base=utc,driftfix=slew -snapshot" # The stress command *must* be installed in the host. # The aim is generate huge load on host so cpu, io, vm - # paramters values can be changed based on host configuration. + # parameters values can be changed based on host configuration. host_load_command = stress --cpu 4 --io 4 --vm 4 --vm-bytes 1G --vm-keep host_load_kill_command = killall stress reboot_method = shell diff --git a/qemu/tests/cgroup.py b/qemu/tests/cgroup.py index 16c950bcff1602d7f5b91979542433bf793f6576..8264940825a11c47df5733a57914961d37532600 100644 --- a/qemu/tests/cgroup.py +++ b/qemu/tests/cgroup.py @@ -585,7 +585,7 @@ def run_cgroup(test, params, env): """ Tests cfs scheduler utilisation when cfs_period_us and cfs_quota_us are set for each virtual CPU with multiple VMs. - Each VM have double the previous created one (1, 2, 4, 8..) upto + Each VM have double the previous created one (1, 2, 4, 8..) up to twice physical CPUs overcommit. cfs quotas are set to 1/2 thus VMs should consume exactly 100%. It measures the difference. @note: VMs are created in test @@ -933,7 +933,7 @@ def run_cgroup(test, params, env): if err: raise error.TestFail(err) else: - return ("Cpu utilisation enforced succesfully") + return ("Cpu utilisation enforced successfully") @error.context_aware def cpuset_cpus(): @@ -1123,7 +1123,7 @@ def run_cgroup(test, params, env): try: verify = _generate_verification(cpusets, no_cpus) except IndexError: - raise error.TestError("IndexError occured while generatin " + raise error.TestError("IndexError occurred while generatin " "verification data. Probably missmatched" " no_host_cpus and cgroup_cpuset cpus") diff --git a/qemu/tests/cpuflags.py b/qemu/tests/cpuflags.py index 0b474c2665eeed6c5e15d6a24cb1bfc2f9c1adb0..eb18b2d656151d234f491bca06803f21d9eaf7ea 100644 --- a/qemu/tests/cpuflags.py +++ b/qemu/tests/cpuflags.py @@ -621,7 +621,7 @@ def run_cpuflags(test, params, env): flags.all_possible_guest_flags) logging.info("Woking CPU flags: %s", str(Flags[0])) logging.info("Not working CPU flags: %s", str(Flags[1])) - logging.warning("Flags works even if not deffined on guest cpu " + logging.warning("Flags works even if not defined on guest cpu " "flags: %s", str(Flags[0] - guest_flags)) logging.warning("Not tested CPU flags: %s", str(Flags[2])) @@ -929,7 +929,7 @@ def run_cpuflags(test, params, env): logging.info("Woking CPU flags: %s", str(Flags[0])) logging.info("Not working CPU flags: %s", str(Flags[1])) - logging.warning("Flags works even if not deffined on" + logging.warning("Flags works even if not defined on" " guest cpu flags: %s", str(Flags[0] - flags.guest_flags)) logging.warning("Not tested CPU flags: %s", @@ -968,7 +968,7 @@ def run_cpuflags(test, params, env): str(Flags[0])) logging.info("Not working CPU flags: %s", str(Flags[1])) - logging.warning("Flags works even if not deffined on" + logging.warning("Flags works even if not defined on" " guest cpu flags: %s", str(Flags[0] - flags.guest_flags)) logging.warning("Not tested CPU flags: %s", @@ -1059,7 +1059,7 @@ def run_cpuflags(test, params, env): logging.info("Woking CPU flags: %s", str(Flags[0])) logging.info("Not working CPU flags: %s", str(Flags[1])) - logging.warning("Flags works even if not deffined on" + logging.warning("Flags works even if not defined on" " guest cpu flags: %s", str(Flags[0] - flags.guest_flags)) logging.warning("Not tested CPU flags: %s", @@ -1101,7 +1101,7 @@ def run_cpuflags(test, params, env): str(Flags[0])) logging.info("Not working CPU flags: %s", str(Flags[1])) - logging.warning("Flags works even if not deffined on" + logging.warning("Flags works even if not defined on" " guest cpu flags: %s", str(Flags[0] - flags.guest_flags)) logging.warning("Not tested CPU flags: %s", diff --git a/qemu/tests/drive_mirror_simple.py b/qemu/tests/drive_mirror_simple.py index 2e4e916b9f9c79bb7714bba379e6134f109f53d8..8d26ff7a822adad62b4d5806aed19614e6898474 100644 --- a/qemu/tests/drive_mirror_simple.py +++ b/qemu/tests/drive_mirror_simple.py @@ -10,7 +10,7 @@ class DriveMirrorSimple(drive_mirror.DriveMirror): @error.context_aware def query_status(self): """ - query runing block mirroring job info; + query running block mirroring job info; """ error.context("query job status", logging.info) if not self.get_status(): diff --git a/qemu/tests/drive_mirror_stress.py b/qemu/tests/drive_mirror_stress.py index 8ab8f25142e369e769481ebbadefe9cf85e55b85..b5734fb49919f75eefbb9144e239de2c9d4b03bb 100644 --- a/qemu/tests/drive_mirror_stress.py +++ b/qemu/tests/drive_mirror_stress.py @@ -46,8 +46,8 @@ class DriveMirrorStress(drive_mirror.DriveMirror): error.context("launch stress app in guest", logging.info) session.sendline(cmd) logging.info("Start command: %s" % cmd) - runing = utils_misc.wait_for(self.app_runing, timeout=150, step=5) - if not runing: + running = utils_misc.wait_for(self.app_runing, timeout=150, step=5) + if not running: raise error.TestFail("stress app isn't running") return None diff --git a/qemu/tests/floppy.py b/qemu/tests/floppy.py index d0f16efcb1e9081870eceddf6f2ca76b6f1db64c..9e177bac5a2891ac0717082065703d6e7d3de60e 100644 --- a/qemu/tests/floppy.py +++ b/qemu/tests/floppy.py @@ -31,7 +31,7 @@ def run_floppy(test, params, env): """ Creates 'new' floppy with one file on it - @param params: paramters for test + @param params: parameters for test @param preapre: if True then it prepare cd images. @return: path to new floppy file. diff --git a/qemu/tests/format_disk.py b/qemu/tests/format_disk.py index a16b97aa3f0ce77aed50eba93298b5f90f4d8d41..a0344f6e49b6456355ed7e6bafd076db8f60c5f9 100644 --- a/qemu/tests/format_disk.py +++ b/qemu/tests/format_disk.py @@ -89,7 +89,7 @@ def run_format_disk(test, params, env): if s != 0: raise error.TestFail("Read file error: %s" % o) if o.strip() != ranstr: - raise error.TestFail("The content writen to file has changed") + raise error.TestFail("The content written to file has changed") umount_cmd = params.get("umount_cmd") if umount_cmd: diff --git a/qemu/tests/ksm_overcommit.py b/qemu/tests/ksm_overcommit.py index 030d6603f7d4d88737f1aa769d7e4efea5edc38e..fe9baedc2727704f86e2d905062b8f7a4146c196 100644 --- a/qemu/tests/ksm_overcommit.py +++ b/qemu/tests/ksm_overcommit.py @@ -30,7 +30,7 @@ def run_ksm_overcommit(test, params, env): memory) (S2, shouldn't finish) 4) Destroy all VMs but the last one 5) Checks the last VMs memory for corruption - Paralel mode - uses one VM with multiple allocator workers. Executes + Parallel mode - uses one VM with multiple allocator workers. Executes scenarios in parallel to put more stress on the KVM. 0) Prints out the setup and initialize guest(s) 1) Fills memory with the same number (S1) diff --git a/qemu/tests/migration_multi_host_ping_pong.py b/qemu/tests/migration_multi_host_ping_pong.py index cc6e6226cc2ac175bb1b30f485fbe89914a8bf2d..40691a9ee52a37f6f8595f837f3fd6a66f900987 100644 --- a/qemu/tests/migration_multi_host_ping_pong.py +++ b/qemu/tests/migration_multi_host_ping_pong.py @@ -95,7 +95,7 @@ def run_migration_multi_host_ping_pong(test, params, env): disk_out = ("\ndisk_test_output: \n" + session.cmd_output("cat %s" % (self.disktest_out))) - raise error.TestFail("Something wrong happend" + raise error.TestFail("Something wrong happened" " during migration %s" " should be running all time" " during this test." @@ -172,7 +172,7 @@ def run_migration_multi_host_ping_pong(test, params, env): cpu_flags_out = ("\n cpuflags_test_output: \n" + session.cmd_output("cat %s" % (self.cpuflags_test_out))) - raise error.TestFail("Something wrong happend" + raise error.TestFail("Something wrong happened" " during migration cpuflags-test" " should be running all time" " during this test.\n%s" % @@ -196,7 +196,7 @@ def run_migration_multi_host_ping_pong(test, params, env): disk_out = ("\n cpuflags_test_output: \n" + session.cmd_output("cat %s" % (self.disktest_out))) - raise error.TestFail("Something wrong happend" + raise error.TestFail("Something wrong happened" " during migration disktest" " should be running all time" " during this test.\n%s" % diff --git a/qemu/tests/multi_vms_file_transfer.py b/qemu/tests/multi_vms_file_transfer.py index d6ec3c47459a1533c85ce38208e0f27b6ac53e1a..84f10d25bb39175584b572753018dd0aa1e435a8 100644 --- a/qemu/tests/multi_vms_file_transfer.py +++ b/qemu/tests/multi_vms_file_transfer.py @@ -67,7 +67,7 @@ def run_multi_vms_file_transfer(test, params, env): error.context("Creating %dMB file on host" % filesize, logging.info) utils.run(cmd) orig_md5 = utils.hash_file(host_path, method="md5") - error.context("Transfering file host -> VM1, timeout: %ss" % \ + error.context("Transferring file host -> VM1, timeout: %ss" % \ transfer_timeout, logging.info) t_begin = time.time() vm1.copy_files_to(host_path, guest_path, timeout=transfer_timeout) @@ -83,7 +83,7 @@ def run_multi_vms_file_transfer(test, params, env): log_vm1 = os.path.join(test.debugdir, "remote_scp_to_vm1_%s.log" %i) log_vm2 = os.path.join(test.debugdir, "remote_scp_to_vm2_%s.log" %i) - msg = "Transfering file VM1 -> VM2, timeout: %ss." % transfer_timeout + msg = "Transferring file VM1 -> VM2, timeout: %ss." % transfer_timeout msg += " Repeat: %s/%s" % (i + 1, repeat_time) error.context(msg, logging.info) t_begin = time.time() @@ -100,7 +100,7 @@ def run_multi_vms_file_transfer(test, params, env): md5_check(session_vm2, orig_md5) session_vm1.cmd("rm -rf %s" % guest_path) - msg = "Transfering file VM2 -> VM1, timeout: %ss." % transfer_timeout + msg = "Transferring file VM2 -> VM1, timeout: %ss." % transfer_timeout msg += " Repeat: %s/%s" % (i + 1, repeat_time) error.context(msg, logging.info) diff --git a/qemu/tests/numa_basic.py b/qemu/tests/numa_basic.py index 064ed60f4c4ca9dab68eef81152299db3bb422d1..d16277c126b85291467eec502634c1be566e1f52 100644 --- a/qemu/tests/numa_basic.py +++ b/qemu/tests/numa_basic.py @@ -16,7 +16,7 @@ def run_numa_basic(test, params, env): 2) Start a guest and bind it on the cpus of one node 3) Check the memory status of qemu process. It should mainly use the memory in the same node. - 4) Destory the guest + 4) Destroy the guest 5) Repeat step 2 ~ 4 on every node in host @param test: QEMU test object diff --git a/qemu/tests/qemu_img.py b/qemu/tests/qemu_img.py index 490adf1d108add029c9d673538e3ce5d5953786b..8b5057bed67a0122e914c6d74d6eef290aa33c6d 100644 --- a/qemu/tests/qemu_img.py +++ b/qemu/tests/qemu_img.py @@ -377,11 +377,11 @@ def run_qemu_img(test, params, env): "original file") vm.destroy() - # Excecute the commit command + # Execute the commit command cmitcmd = "%s commit -f %s %s.%s" % (cmd, image_format, backing_file_name, image_format) - error.context("Commiting image by command %s" % cmitcmd, + error.context("Committing image by command %s" % cmitcmd, logging.info) try: utils.system(cmitcmd, verbose=False) diff --git a/qemu/tests/sr_iov_boot_negative.py b/qemu/tests/sr_iov_boot_negative.py index 6fc00792c55262cda74e46bd3eae29c2f25fd3d6..e8cc42f725434eb889bacadbd562f26c14b2a7fd 100644 --- a/qemu/tests/sr_iov_boot_negative.py +++ b/qemu/tests/sr_iov_boot_negative.py @@ -6,9 +6,9 @@ from virttest import env_process @error.context_aware def run_sr_iov_boot_negative(test, params, env): """ - KVM boot with negative paramter test: + KVM boot with negative parameter test: 1) Try to boot VM with negative parameters. - 2) Verify that qemu could handle the negative paramters. + 2) Verify that qemu could handle the negative parameters. Check the negative message (optional) @param test: qemu test object diff --git a/qemu/tests/tracing_exception_injection.py b/qemu/tests/tracing_exception_injection.py index 18d2136927f5abe15fcd216b26d619b301b5369f..b1b9335fda7fd5c5959ac5db2c66cbc286ceb04f 100644 --- a/qemu/tests/tracing_exception_injection.py +++ b/qemu/tests/tracing_exception_injection.py @@ -39,6 +39,6 @@ def run_tracing_exception_injection(test, params, env): try: utils.run(inj_check_cmd) except error.CmdError: - err_msg = "kvm:kvm_inj_exception is not an avaliable event in host" + err_msg = "kvm:kvm_inj_exception is not an available event in host" raise error.TestFail(err_msg) logging.info("Host supports tracing of exception injection in KVM") diff --git a/qemu/tests/transfer_file_over_ipv6.py b/qemu/tests/transfer_file_over_ipv6.py index 6c82e3d2dd2050ae0e5e8e106f1b91f090cd02c9..20b4279858fc3936c200e20b2f311c547c5aecaa 100644 --- a/qemu/tests/transfer_file_over_ipv6.py +++ b/qemu/tests/transfer_file_over_ipv6.py @@ -105,7 +105,7 @@ def run_transfer_file_over_ipv6(test, params, env): for vm_src in addresses: for vm_dst in addresses: if vm_src != vm_dst: - error.context("Transfering data from %s to %s" % + error.context("Transferring data from %s to %s" % (vm_src.name, vm_dst.name), logging.info) remote.scp_between_remotes("%s%%%s" % (addresses[vm_src], host_ifname), diff --git a/qemu/tests/virtio_console.py b/qemu/tests/virtio_console.py index cb18dff4636453a2fcfee937458d4f7ed56a6e67..25bfc9bba7bf4c16f0743c6faed6e462b5e64987 100644 --- a/qemu/tests/virtio_console.py +++ b/qemu/tests/virtio_console.py @@ -645,7 +645,7 @@ def run_virtio_console(test, params, env): err = "" end_time = time.time() + test_time no_threads = len(threads) - transfered = [0] * no_threads + transferred = [0] * no_threads while end_time > time.time(): if not vm.is_alive(): err += "main(vmdied), " @@ -654,12 +654,12 @@ def run_virtio_console(test, params, env): if not threads[i].isAlive(): err += "main(th%s died), " % threads[i] _transfered.append(threads[i].idx) - if (_transfered == transfered and - transfered != [0] * no_threads): + if (_transfered == transferred and + transferred != [0] * no_threads): err += "main(no_data), " - transfered = _transfered + transferred = _transfered if err: - logging.error("Error occured while executing loopback " + logging.error("Error occurred while executing loopback " "(%d out of %ds)", test_time - int(end_time - time.time()), test_time) @@ -692,7 +692,7 @@ def run_virtio_console(test, params, env): tmp[:-2]) if err: no_errors += 1 - logging.error("test_loopback: error occured in threads: %s.", + logging.error("test_loopback: error occurred in threads: %s.", err[:-2]) guest_worker.safe_exit_loopback_threads([send_pt], recv_pts) @@ -713,7 +713,7 @@ def run_virtio_console(test, params, env): cleanup(vm, guest_worker) if no_errors: - msg = ("test_loopback: %d errors occured while executing test, " + msg = ("test_loopback: %d errors occurred while executing test, " "check log for details." % no_errors) logging.error(msg) raise error.TestFail(msg) @@ -804,8 +804,8 @@ def run_virtio_console(test, params, env): Hibernate (S4) and resume the VM. @note: data loss is handled differently in this case. First we set data loss to (almost) infinity. After the resume we - periodically check the number of transfered and lost data. - When there is no loss and number of transfered data is + periodically check the number of transferred and lost data. + When there is no loss and number of transferred data is sufficient, we take it as the initial data loss is over. Than we set the allowed loss to 0. """ @@ -854,7 +854,7 @@ def run_virtio_console(test, params, env): if dcount < 100: continue if dloss == 0: - # at least 100 chars were transfered without data loss + # at least 100 chars were transferred without data loss # the initial loss is over break _loss = loss @@ -991,7 +991,7 @@ def run_virtio_console(test, params, env): if not threads[1].isAlive(): logging.error('RecvCheck thread stopped unexpectedly.') if count == threads[1].idx: - logging.error('No data transfered after interruption!') + logging.error('No data transferred after interruption!') logging.info('Output from GuestWorker:\n%s', guest_worker.read_nonblocking()) try: @@ -1008,7 +1008,7 @@ def run_virtio_console(test, params, env): logging.warn("Failed to get info from qtree: %s", inst) exit_event.set() vm.verify_kernel_crash() - raise error.TestFail('No data transfered after' + raise error.TestFail('No data transferred after' 'interruption.') except Exception, inst: err = True @@ -1155,7 +1155,7 @@ def run_virtio_console(test, params, env): thread.join() if thread.ret_code: no_errors += 1 - logging.error("test_perf: error occured in thread %s", + logging.error("test_perf: error occurred in thread %s", thread) # Let the guest read-out all the remaining data @@ -1199,7 +1199,7 @@ def run_virtio_console(test, params, env): thread.join() if thread.ret_code: no_errors += 1 - logging.error("test_perf: error occured in thread %s", + logging.error("test_perf: error occurred in thread %s", thread) # Deviation is higher than single time_slice if (_time > time_slice): @@ -1233,7 +1233,7 @@ def run_virtio_console(test, params, env): del exit_event cleanup(vm, guest_worker) if no_errors: - msg = ("test_perf: %d errors occured while executing test, " + msg = ("test_perf: %d errors occurred while executing test, " "check log for details." % no_errors) logging.error(msg) raise error.TestFail(msg) @@ -1261,7 +1261,7 @@ def run_virtio_console(test, params, env): ports = get_virtio_ports(vm)[0] # TODO BUG: sendlen = max allowed data to be lost per one migration - # TODO BUG: using SMP the data loss is upto 4 buffers + # TODO BUG: using SMP the data loss is up to 4 buffers # 2048 = char.dev. socket size, parms[2] = host->guest send buffer size sendlen = 2 * 2 * max(qemu_virtio_port.SOCKET_SIZE, blocklen) if not offline: # TODO BUG: online migration causes more loses @@ -1345,7 +1345,7 @@ def run_virtio_console(test, params, env): time.sleep(2) if not threads[0].isAlive(): if exit_event.isSet(): - raise error.TestFail("Exit event emited, check the log for" + raise error.TestFail("Exit event emitted, check the log for" "send/recv thread failure.") else: exit_event.set() @@ -1359,7 +1359,7 @@ def run_virtio_console(test, params, env): if verified[i] == threads[i + 1].idx: exit_event.set() raise error.TestFail("No new data in %d console were " - "transfered after migration %d" + "transferred after migration %d" % (i, (j + 1))) verified[i] = threads[i + 1].idx logging.info("%d out of %d migration(s) passed", (j + 1), @@ -1393,7 +1393,7 @@ def run_virtio_console(test, params, env): logging.info("test_migrate: %s data received and verified during %d " "migrations", tmp[:-2], no_migrations) if err: - msg = "test_migrate: error occured in threads: %s." % err[:-2] + msg = "test_migrate: error occurred in threads: %s." % err[:-2] logging.error(msg) raise error.TestFail(msg) @@ -1605,7 +1605,7 @@ def run_virtio_console(test, params, env): @param cfg: virtio_console_loops - how many loops to run """ # TODO: QMP - # TODO: check qtree for device presense + # TODO: check qtree for device presence pause = int(params.get("virtio_console_pause", 10)) vm = get_vm_with_ports() idx = 1 @@ -1618,7 +1618,7 @@ def run_virtio_console(test, params, env): % (idx)) time.sleep(pause) if ret != "": - raise error.TestFail("Error occured while hotpluging virtio-" + raise error.TestFail("Error occurred while hotpluging virtio-" "pci. Iteration %s, monitor output:\n%s" % (i, ret)) @@ -1791,15 +1791,15 @@ def run_virtio_console(test, params, env): method=params.get('virtio_console_method', 'shell'), timeout=720) except Exception, details: - for proces in process: - proces.terminate() + for process in process: + process.terminate() for port in vm.virtio_ports: port.close() raise error.TestFail("Fail to reboot VM:\n%s" % details) # close the virtio ports and process - for proces in process: - proces.terminate() + for process in process: + process.terminate() for port in vm.virtio_ports: port.close() error.context("Executing basic loopback after reboot.", logging.info) diff --git a/qemu/tests/virtual_nic_private.py b/qemu/tests/virtual_nic_private.py index 9a5369b065ba73a3a2ca660aafe924f04cd8f699..d5e1477836be731a364a426241ce17c67af1c7cb 100644 --- a/qemu/tests/virtual_nic_private.py +++ b/qemu/tests/virtual_nic_private.py @@ -72,7 +72,7 @@ def run_virtual_nic_private(test, params, env): sessions[0].cmd(dd_cmd % (src_file, filesize), timeout=timeout) t.start() - error.context("Transfering file guest1 -> guest2", logging.info) + error.context("Transferring file guest1 -> guest2", logging.info) remote.scp_between_remotes(addresses[0], addresses[1], shell_port, password, password, username, username, src_file, dst_file) diff --git a/qemu/tests/virtual_nic_send_buffer.py b/qemu/tests/virtual_nic_send_buffer.py index 6ad92e819f9c55cacea53789f021a46295788249..6049735fabab18a27914816913c08f899035fc64 100644 --- a/qemu/tests/virtual_nic_send_buffer.py +++ b/qemu/tests/virtual_nic_send_buffer.py @@ -74,7 +74,7 @@ def run_virtual_nic_send_buffer(test, params, env): try: error.context("Transfer data from host to each guest") for vm in vms: - error.context("Transfering data from host to guest %s " % vm.name, + error.context("Transferring data from host to guest %s " % vm.name, logging.info) vm.copy_files_to(host_file, src_file, timeout=transfer_timeout) @@ -93,7 +93,7 @@ def run_virtual_nic_send_buffer(test, params, env): for vm_src in addresses: for vm_dst in addresses: if vm_src != vm_dst: - error.context("Transfering data %s to %s" % + error.context("Transferring data %s to %s" % (vm_src, vm_dst), logging.info) remote.udp_copy_between_remotes(vm_src, vm_dst, shell_port, diff --git a/qemu/tests/win_virtio_update.py b/qemu/tests/win_virtio_update.py index c26ec27c5b813ee2820e0895441d6b0ee97104d5..b7284b145e2b5408a42d3ea97f477dee4615c676 100644 --- a/qemu/tests/win_virtio_update.py +++ b/qemu/tests/win_virtio_update.py @@ -61,7 +61,7 @@ def run_win_virtio_update(test, params, env): if re.findall("zip$", url_virtio_win): utils.system("cd /tmp/virtio_win; unzip *; rm -f *.zip") - virtio_version = params.get("virtio_version", "unkown") + virtio_version = params.get("virtio_version", "unknown") virtio_iso = params.get("cdrom_virtio", "/tmp/prewhql.iso") utils.system("mkisofs -J -o %s /tmp/virtio_win" % virtio_iso) diff --git a/qemu/tests/yonit_bitmap.py b/qemu/tests/yonit_bitmap.py index df73363d86c342b434a76ab823f95860167f722c..74f750cc0bb9c95383980a9967f2652971a75775 100644 --- a/qemu/tests/yonit_bitmap.py +++ b/qemu/tests/yonit_bitmap.py @@ -11,7 +11,7 @@ def run_yonit_bitmap(test, params, env): for regression test of BZ #556455. Run the benchmark (infinite) loop background using - run_guest_test_background, and detect the existance of the process + run_guest_test_background, and detect the existence of the process in guest. 1. If the process exits before test timeout, that means the benchmark diff --git a/tests/build.py b/tests/build.py index 6dfd2b90b63e786f5669d5fcdffb72f247d312b6..4a7791025e1384013c734561826192ce472b2d00 100644 --- a/tests/build.py +++ b/tests/build.py @@ -13,7 +13,7 @@ def run_build(test, params, env): srcdir = params.get("srcdir", test.srcdir) params["srcdir"] = srcdir - # Flag if a installer minor failure ocurred + # Flag if a installer minor failure occurred minor_failure = False minor_failure_reasons = [] diff --git a/tests/ethtool.py b/tests/ethtool.py index 96dd78d5ddafe878b5b687971bf74eb141831034..3fb51b0e044fdfaeb88523a01c5dac150554eea0 100644 --- a/tests/ethtool.py +++ b/tests/ethtool.py @@ -172,7 +172,7 @@ def run_ethtool(test, params, env): lambda:session.cmd_status("pgrep tcpdump") == 0, 30): return (False, "Tcpdump process wasn't launched") - txt = "Transfering file %s from %s" % (filename, src) + txt = "Transferring file %s from %s" % (filename, src) error.context(txt, logging.info) try: copy_files_func(filename, filename) diff --git a/tests/rv_fullscreen.py b/tests/rv_fullscreen.py index 5ea89242adf645cfe75e8361869f9be1c232de81..fd7ce1febdac66ff8a815ff30a3cf4bbd6f18467 100644 --- a/tests/rv_fullscreen.py +++ b/tests/rv_fullscreen.py @@ -1,7 +1,7 @@ """ rv_fullscreen.py - remote-viewer full screen Testing the remote-viewer --full-screen option - If successfull, the resolution of the guest will + If successful, the resolution of the guest will take the resolution of the client. Requires: connected binaries remote-viewer, Xorg, gnome session diff --git a/tests/rv_input.py b/tests/rv_input.py index 8349a8c7cf02d838099f1dff3afed3096e37adef..c632a5ff576c3c32ea86c3285e5e24c3722eefb4 100644 --- a/tests/rv_input.py +++ b/tests/rv_input.py @@ -19,7 +19,7 @@ def deploy_epel_repo(guest_session, params): @param params """ - #Check existance of epel repository + #Check existence of epel repository cmd = ("if [ ! -f /etc/yum.repos.d/epel.repo ]; then echo" " \"NeedsInstall\"; fi") output = guest_session.cmd(cmd, timeout=10) diff --git a/tests/unattended_install.py b/tests/unattended_install.py index f9bd8d2d6ea54e4dd0c860eb4b98e617c3ce46d5..01097518965c9bdf5a51ae8c000ee0e5363b24b7 100644 --- a/tests/unattended_install.py +++ b/tests/unattended_install.py @@ -112,7 +112,7 @@ class UnattendedInstallConfig(object): """ def __init__(self, test, params, vm): """ - Sets class atributes from test parameters. + Sets class attributes from test parameters. @param test: QEMU test object. @param params: Dictionary with test parameters. diff --git a/tests/vlan.py b/tests/vlan.py index f815c440c5bee794bf8fc24eb8e93036a906eecc..068681f27aa73a5245016981ccc39761f63e6e70 100644 --- a/tests/vlan.py +++ b/tests/vlan.py @@ -108,12 +108,12 @@ def run_vlan(test, params, env): else: logging.info("digest_origin is %s", digest_origin[src]) logging.info("digest_receive is %s", digest_receive) - raise error.TestFail("File transfered differ from origin") + raise error.TestFail("File transferred differ from origin") session[dst].cmd("rm -f receive") def flood_ping(src, dst): - # we must use a dedicated session becuase the aexpect + # we must use a dedicated session because the aexpect # does not have the other method to interrupt the process in # the guest rather than close the session. error.context("Flood ping from %s interface %s to %s" % (vm[src].name, @@ -148,7 +148,7 @@ def run_vlan(test, params, env): session.append(vm[i].wait_for_login(timeout=login_timeout)) if not session[i] : raise error.TestError("Could not log into guest %s" % vm[i].name) - logging.info("Logged in %s successfull" % vm[i].name) + logging.info("Logged in %s successful" % vm[i].name) ifname.append(utils_net.get_linux_ifname(session[i], vm[i].get_mac_address())) @@ -200,7 +200,7 @@ def run_vlan(test, params, env): flood_ping(0, 1) flood_ping(1, 0) - error.context("Transfering data through nc", logging.info) + error.context("Transferring data through nc", logging.info) nc_transfer(0, 1) nc_transfer(1, 0) diff --git a/tests/watchdog.py b/tests/watchdog.py index 11aaf6036748419394d37631bffddc22b62f9aee..d9ed4cee2659ef7615b40b601dab88a06c50abcd 100644 --- a/tests/watchdog.py +++ b/tests/watchdog.py @@ -68,8 +68,8 @@ def run_watchdog(test, params, env): def _action_check(session, watchdog_action): """ - Check whether or not the watchdog action occured. if the action was - not occured will raise error. + Check whether or not the watchdog action occurred. if the action was + not occurred will raise error. """ #when watchdog action is pause, shutdown, reset, poweroff #the vm session will lost responsive