Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
tp-qemu
提交
30d73b93
T
tp-qemu
项目概览
openeuler
/
tp-qemu
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
tp-qemu
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
30d73b93
编写于
3月 22, 2016
作者:
S
suqinhuang
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #481 from spcui/migration_multi_host_downtime
qemu/tests: Update migration_multi_host_downtime_and_speed.py
上级
c11b2714
e75ce799
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
131 addition
and
90 deletion
+131
-90
qemu/tests/cfg/multi_host.cfg
qemu/tests/cfg/multi_host.cfg
+24
-4
qemu/tests/migration_multi_host_downtime_and_speed.py
qemu/tests/migration_multi_host_downtime_and_speed.py
+107
-86
未找到文件。
qemu/tests/cfg/multi_host.cfg
浏览文件 @
30d73b93
...
...
@@ -127,12 +127,22 @@
control_args = "--cpu 4 --io 4 --vm 2 --vm-bytes 128M --timeout 120s"
check_running_cmd = "pgrep stress"
need_cleanup = no
- downtime:
sub_type = downtime
# downtime in seconds.
max_downtime = 10
- set_downtime:
not_wait_for_migration = yes
type = migration_multi_host_downtime_and_speed
bg_stress_test = autotest_control
test_control_file = stress.control
control_args = "--cpu 4 --io 4 --vm 2 --vm-bytes 64M"
check_running_cmd = "pgrep stress"
kill_bg_stress_cmd = "killall -9 stress"
variants:
- before_migrate:
sub_type = before_migrate
mig_downtime = 5
- after_migrate:
sub_type = after_migrate
# downtime in seconds.
max_downtime = 10
- speed:
sub_type = speed
# speed in Mb
...
...
@@ -145,11 +155,21 @@
count_of_change = 10
not_wait_for_migration = yes
type = migration_multi_host_downtime_and_speed
bg_stress_test = autotest_control
test_control_file = stress.control
control_args = "--cpu 4 --io 4 --vm 2 --vm-bytes 64M"
check_running_cmd = "pgrep stress"
kill_bg_stress_cmd = "killall -9 stress"
- mig_stop_during:
sub_type = stop_during
wait_before_stop = 3
not_wait_for_migration = no
type = migration_multi_host_downtime_and_speed
bg_stress_test = autotest_control
test_control_file = stress.control
control_args = "--cpu 4 --io 4 --vm 2 --vm-bytes 64M"
check_running_cmd = "pgrep stress"
kill_bg_stress_cmd = "killall -9 stress"
- ping-pong-stress:
# amount of memory used for migration stress
# amount of memory shouldn't be too high otherwise
...
...
qemu/tests/migration_multi_host_downtime_and_speed.py
浏览文件 @
30d73b93
import
logging
import
os
import
time
from
autotest.client.shared
import
error
from
autotest.client.shared
import
utils
from
virttest
import
utils_misc
from
virttest
import
utils_test
from
virttest
import
remote
from
virttest
import
virt_vm
from
virttest
import
utils_misc
from
provider
import
cpuflags
@
error
.
context_aware
def
run
(
test
,
params
,
env
):
"""
KVM multi-host migration test:
Migration execution progress is described in documentation
for migrate method in class MultihostMigration.
steps:
1) login vm and load stress
2) set downtime before migrate (optional)
3) do migration
4) set downtime/speed after migrate (optional)
5) check downtime/speed value when migrate finished
:param test: kvm test object.
:param params: Dictionary with test parameters.
...
...
@@ -37,14 +39,15 @@ def run(test, params, env):
def
__init__
(
self
,
test
,
params
,
env
):
super
(
TestMultihostMigration
,
self
).
__init__
(
test
,
params
,
env
)
self
.
install_path
=
params
.
get
(
"cpuflags_install_path"
,
"/tmp"
)
self
.
vm_mem
=
int
(
params
.
get
(
"mem"
,
"512"
))
self
.
srchost
=
self
.
params
.
get
(
"hosts"
)[
0
]
self
.
dsthost
=
self
.
params
.
get
(
"hosts"
)[
1
]
self
.
is_src
=
params
[
"hostid"
]
==
self
.
srchost
self
.
vms
=
params
[
"vms"
].
split
()
self
.
sub_type
=
self
.
params
.
get
(
"sub_type"
,
None
)
self
.
mig_downtime
=
int
(
self
.
params
.
get
(
"mig_downtime"
,
"3"
))
self
.
max_downtime
=
int
(
self
.
params
.
get
(
"max_mig_downtime"
,
"10"
))
self
.
wait_mig_timeout
=
int
(
self
.
params
.
get
(
"wait_mig_timeout"
,
"30"
))
self
.
min_speed
=
self
.
params
.
get
(
"min_migration_speed"
,
"10"
)
self
.
max_speed
=
self
.
params
.
get
(
"max_migration_speed"
,
"1000"
)
self
.
ch_speed
=
int
(
self
.
params
.
get
(
"change_speed_interval"
,
1
))
...
...
@@ -55,7 +58,10 @@ def run(test, params, env):
self
.
speed_step
=
int
((
self
.
max_speed
-
self
.
min_speed
)
/
speed_count
)
if
self
.
sub_type
==
"downtime"
:
if
self
.
sub_type
==
"before_migrate"
:
self
.
before_migration
=
self
.
before_migration_downtime
self
.
post_migration
=
self
.
post_migration_before_downtime
if
self
.
sub_type
==
"after_migrate"
:
self
.
post_migration
=
self
.
post_migration_downtime
elif
self
.
sub_type
==
"speed"
:
self
.
post_migration
=
self
.
post_migration_speed
...
...
@@ -65,67 +71,86 @@ def run(test, params, env):
error
.
TestFail
(
"Wrong subtest type selected %s"
%
(
self
.
sub_type
))
def
mig_finished
(
self
,
vm
):
ret
=
True
if
(
vm
.
params
[
"display"
]
==
"spice"
and
vm
.
get_spice_var
(
"spice_seamless_migration"
)
==
"on"
):
s
=
vm
.
monitor
.
info
(
"spice"
)
if
isinstance
(
s
,
str
):
ret
=
"migrated: true"
in
s
else
:
ret
=
s
.
get
(
"migrated"
)
==
"true"
o
=
vm
.
monitor
.
info
(
"migrate"
)
if
isinstance
(
o
,
str
):
return
ret
and
(
"status: active"
not
in
o
)
else
:
return
ret
and
(
o
.
get
(
"status"
)
!=
"active"
)
def
clean_up
(
self
,
vm
):
kill_bg_stress_cmd
=
params
.
get
(
"kill_bg_stress_cmd"
,
"killall -9 stress"
)
logging
.
info
(
"Kill the background stress test in the guest."
)
session
=
vm
.
wait_for_login
(
timeout
=
self
.
login_timeout
)
session
.
sendline
(
kill_bg_stress_cmd
)
session
.
close
()
@
error
.
context_aware
def
check_mig_downtime
(
self
,
vm
):
logging
.
info
(
"Check downtime after migration."
)
actual_downtime
=
int
(
vm
.
monitor
.
info
(
"migrate"
).
get
(
"downtime"
))
if
actual_downtime
>
self
.
mig_downtime
*
1000
:
error
=
"Migration failed for setting downtime, "
error
+=
"Expected: '%d', Actual: '%d'"
%
(
self
.
mig_downtime
,
actual_downtime
)
raise
error
.
TestFail
(
error
)
@
error
.
context_aware
def
before_migration_downtime
(
self
,
mig_data
):
if
self
.
is_src
:
vm
=
env
.
get_vm
(
params
[
"main_vm"
])
error
.
context
(
"Set downtime before migration."
,
logging
.
info
)
vm
.
monitor
.
migrate_set_downtime
(
self
.
mig_downtime
)
@
error
.
context_aware
def
post_migration_before_downtime
(
self
,
vm
,
cancel_delay
,
mig_offline
,
dsthost
,
vm_ports
,
not_wait_for_migration
,
fd
,
mig_data
):
try
:
vm
.
wait_for_migration
(
self
.
mig_timeout
)
except
virt_vm
.
VMMigrateTimeoutError
:
raise
error
.
TestFail
(
"Migration failed with setting "
" downtime to %ds."
%
self
.
mig_downtime
)
def
wait_for_migration
(
self
,
vm
,
timeout
):
if
not
utils_misc
.
wait_for
(
lambda
:
self
.
mig_finished
(
vm
),
timeout
,
2
,
2
,
"Waiting for migration to complete"
):
raise
virt_vm
.
VMMigrateTimeoutError
(
"Timeout expired while"
" waiting for migration"
" to finish"
)
logging
.
info
(
"Migration completed with downtime "
"is %s seconds."
,
self
.
mig_downtime
)
self
.
check_mig_downtime
(
vm
)
vm
.
destroy
(
gracefully
=
False
)
@
error
.
context_aware
def
post_migration_downtime
(
self
,
vm
,
cancel_delay
,
mig_offline
,
dsthost
,
vm_ports
,
not_wait_for_migration
,
fd
,
mig_data
):
super
(
TestMultihostMigration
,
self
).
post_migration
(
vm
,
cancel_delay
,
mig_offline
,
dsthost
,
vm_ports
,
not_wait_for_migration
,
fd
,
mig_data
)
logging
.
info
(
"Set downtime after migration."
)
downtime
=
0
for
downtime
in
range
(
1
,
self
.
max_downtime
):
for
downtime
in
x
range
(
1
,
self
.
max_downtime
):
try
:
self
.
wait_for_migration
(
vm
,
10
)
vm
.
wait_for_migration
(
self
.
wait_mig_timeout
)
break
except
virt_vm
.
VMMigrateTimeoutError
:
logging
.
info
(
"Set downtime to %d seconds."
,
downtime
)
vm
.
monitor
.
migrate_set_downtime
(
downtime
)
logging
.
debug
(
"Migration pass with downtime %s"
,
downtime
)
try
:
vm
.
wait_for_migration
(
self
.
mig_timeout
)
except
virt_vm
.
VMMigrateTimeoutError
:
raise
error
.
TestFail
(
"Migration failed with setting "
" downtime to %ds."
%
downtime
)
self
.
mig_downtime
=
downtime
-
1
logging
.
info
(
"Migration completed with downtime "
"is %s seconds."
,
self
.
mig_downtime
)
self
.
check_mig_downtime
(
vm
)
vm
.
destroy
(
gracefully
=
False
)
def
post_migration_speed
(
self
,
vm
,
cancel_delay
,
mig_offline
,
dsthost
,
vm_ports
,
not_wait_for_migration
,
fd
,
mig_data
):
super
(
TestMultihostMigration
,
self
).
post_migration
(
vm
,
cancel_delay
,
mig_offline
,
dsthost
,
vm_ports
,
not_wait_for_migration
,
fd
,
mig_data
)
self
.
min_speed
self
.
max_speed
self
.
ch_speed
mig_speed
=
None
for
mig_speed
in
range
(
self
.
min_speed
,
self
.
max_speed
,
self
.
speed_step
):
try
:
self
.
wait_for_migration
(
vm
,
5
)
vm
.
wait_for_migration
(
self
.
wait_mig_timeout
)
break
except
virt_vm
.
VMMigrateTimeoutError
:
vm
.
monitor
.
migrate_set_speed
(
"%sB"
%
(
mig_speed
))
...
...
@@ -133,61 +158,57 @@ def run(test, params, env):
# Test migration status. If migration is not completed then
# it kill program which creates guest load.
try
:
self
.
wait_for_migration
(
vm
,
5
)
vm
.
wait_for_migration
(
self
.
mig_timeout
)
except
virt_vm
.
VMMigrateTimeoutError
:
try
:
session
=
vm
.
wait_for_login
(
timeout
=
15
)
session
.
sendline
(
"killall -9 cpuflags-test"
)
except
remote
.
LoginTimeoutError
:
try
:
self
.
wait_for_migration
(
vm
,
5
)
except
virt_vm
.
VMMigrateTimeoutError
:
raise
error
.
TestFail
(
"Migration wan't successful"
" and VM is not accessible."
)
self
.
wait_for_migration
(
vm
,
self
.
mig_timeout
)
logging
.
debug
(
"Migration pass with mig_speed %sB"
,
mig_speed
)
raise
error
.
TestFail
(
"Migration failed with setting "
" mig_speed to %sB."
%
mig_speed
)
logging
.
debug
(
"Migration passed with mig_speed %sB"
,
mig_speed
)
vm
.
destroy
(
gracefully
=
False
)
def
post_migration_stop
(
self
,
vm
,
cancel_delay
,
mig_offline
,
dsthost
,
vm_ports
,
not_wait_for_migration
,
fd
,
mig_data
):
super
(
TestMultihostMigration
,
self
).
post_migration
(
vm
,
cancel_delay
,
mig_offline
,
dsthost
,
vm_ports
,
not_wait_for_migration
,
fd
,
mig_data
)
wait_before_mig
=
int
(
vm
.
params
.
get
(
"wait_before_stop"
,
"5"
))
try
:
self
.
wait_for_migration
(
vm
,
wait_before_mig
)
vm
.
wait_for_migration
(
wait_before_mig
)
except
virt_vm
.
VMMigrateTimeoutError
:
vm
.
pause
()
def
migrate_vms_src
(
self
,
mig_data
):
super_cls
=
super
(
TestMultihostMigration
,
self
)
super_cls
.
migrate_vms_src
(
mig_data
)
try
:
vm
.
wait_for_migration
(
self
.
mig_timeout
)
except
virt_vm
.
VMMigrateTimeoutError
:
raise
error
.
TestFail
(
"Migration failed when vm is paused."
)
def
migration_scenario
(
self
,
worker
=
None
):
def
worker_func
(
mig_data
):
vm
=
mig_data
.
vms
[
0
]
@
error
.
context_aware
def
start_worker
(
mig_data
):
error
.
context
(
"Load stress in guest."
,
logging
.
info
)
vm
=
env
.
get_vm
(
params
[
"main_vm"
])
session
=
vm
.
wait_for_login
(
timeout
=
self
.
login_timeout
)
bg_stress_test
=
params
.
get
(
"bg_stress_test"
)
check_running_cmd
=
params
.
get
(
"check_running_cmd"
)
bg
=
utils
.
InterruptedThread
(
utils_test
.
run_virt_sub_test
,
args
=
(
test
,
params
,
env
,),
kwargs
=
{
"sub_type"
:
bg_stress_test
})
bg
.
start
()
cpuflags
.
install_cpuflags_util_on_vm
(
test
,
vm
,
self
.
install_path
,
extra_flags
=
"-msse3 -msse2"
)
def
is_stress_running
():
return
session
.
cmd_status
(
check_running_cmd
)
==
0
cmd
=
(
"nohup %s/cpuflags-test --stressmem %d,%d &"
%
(
os
.
path
.
join
(
self
.
install_path
,
"cpu_flags"
),
self
.
vm_mem
*
100
,
self
.
vm_mem
/
2
))
logging
.
debug
(
"Sending command: %s"
%
(
cmd
))
session
.
sendline
(
cmd
)
time
.
sleep
(
3
)
if
not
utils_misc
.
wait_for
(
is_stress_running
,
timeout
=
360
):
raise
error
.
TestFail
(
"Failed to start %s in guest."
%
bg_stress_test
)
if
worker
is
None
:
worker
=
worker_func
def
check_worker
(
mig_data
):
if
not
self
.
is_src
:
vm
=
env
.
get_vm
(
params
[
"main_vm"
])
self
.
clean_up
(
vm
)
self
.
migrate_wait
(
self
.
vms
,
self
.
srchost
,
self
.
dsthost
,
start_work
=
worker
)
start_work
er
,
check_
worker
)
mig
=
TestMultihostMigration
(
test
,
params
,
env
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录