Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
tp-qemu
提交
3e4fdb66
T
tp-qemu
项目概览
openeuler
/
tp-qemu
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
tp-qemu
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
3e4fdb66
编写于
8月 20, 2019
作者:
L
Longxiang Lyu
提交者:
GitHub
8月 20, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #1830 from wangxianxian/1716737_numa
Numa_memdev_options: Assign proper host numa node automatically
上级
154051de
cba80adf
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
60 addition
and
31 deletion
+60
-31
qemu/tests/cfg/numa_memdev_options.cfg
qemu/tests/cfg/numa_memdev_options.cfg
+3
-15
qemu/tests/numa_memdev_mlock.py
qemu/tests/numa_memdev_mlock.py
+14
-4
qemu/tests/numa_memdev_options.py
qemu/tests/numa_memdev_options.py
+43
-12
未找到文件。
qemu/tests/cfg/numa_memdev_options.cfg
浏览文件 @
3e4fdb66
...
...
@@ -5,8 +5,7 @@
mem_devs = "mem0 mem1"
backend_mem = memory-backend-ram
use_mem = no
host-nodes_mem0 = 0
host-nodes_mem1 = 1
not_preprocess = yes
size_mem0 = 1024M
size_mem1 = 3072M
guest_numa_nodes = "node0 node1"
...
...
@@ -22,8 +21,6 @@
variants:
- policy_default:
policy_mem = default
del host-nodes_mem0
del host-nodes_mem1
- policy_bind:
policy_mem = bind
- policy_interleave:
...
...
@@ -38,19 +35,10 @@
start_vm = no
- numa_hugepage:
backend_mem = memory-backend-file
set
up_hugepages
= yes
set
_node_hugepage
= yes
mem-path = /mnt/kvm_hugepage
target_nodes = 0 1
# Please update following numbers according to actual hugepage size,
# It intends for 2M hugepage by default.
target_num_node0 = 522
target_num_node1 = 1546
- numa_ram_hugepage:
backend_mem_mem0 = memory-backend-file
backend_mem_mem1 = memory-backend-ram
set
up_hugepages
= yes
set
_node_hugepage
= yes
mem-path_mem0 = /mnt/kvm_hugepage
target_nodes = 0
# Please update following numbers according to actual hugepage size,
# It intends for 2M hugepage by default.
target_num_node0 = 522
qemu/tests/numa_memdev_mlock.py
浏览文件 @
3e4fdb66
...
...
@@ -4,13 +4,14 @@ from virttest import error_context
from
qemu.tests
import
numa_memdev_options
from
qemu.tests.mlock_basic
import
MlockBasic
from
virttest
import
env_process
@
error_context
.
context_aware
def
run
(
test
,
params
,
env
):
"""
[Memory][Numa] NUMA memdev option test with mlock, this case will:
1) Check host's numa node(s)
amount
.
1) Check host's numa node(s).
2) Get nr_mlock and nr_unevictable in host before VM start.
3) Start the VM.
4) Get nr_mlock and nr_unevictable in host after VM start.
...
...
@@ -22,9 +23,18 @@ def run(test, params, env):
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
error_context
.
context
(
"Check host's numa node(s) amount!"
,
logging
.
info
)
numa_memdev_options
.
check_host_numa_node_amount
(
test
)
error_context
.
context
(
"Check host's numa node(s)!"
,
logging
.
info
)
valid_nodes
=
numa_memdev_options
.
get_host_numa_node
()
if
len
(
valid_nodes
)
<
2
:
test
.
cancel
(
"The host numa nodes that whose size is not zero should be "
"at least 2! But there is %d."
%
len
(
valid_nodes
))
if
params
.
get
(
'policy_mem'
)
!=
'default'
:
error_context
.
context
(
"Assign host's numa node(s)!"
,
logging
.
info
)
params
[
'host-nodes_mem0'
]
=
valid_nodes
[
0
]
params
[
'host-nodes_mem1'
]
=
valid_nodes
[
1
]
env_process
.
preprocess_vm
(
test
,
params
,
env
,
params
[
"main_vm"
])
numa_mlock_test
=
MlockBasic
(
test
,
params
,
env
)
numa_mlock_test
.
start
()
...
...
qemu/tests/numa_memdev_options.py
浏览文件 @
3e4fdb66
import
logging
import
re
from
avocado.utils
import
process
from
virttest
import
error_context
from
virttest
import
utils_misc
from
virttest
import
env_process
from
virttest.staging
import
utils_memory
from
virttest.compat_52lts
import
decode_to_text
from
virttest.utils_numeric
import
normalize_data_size
def
check_host_numa_node_amount
(
test
):
def
get_host_numa_node
(
):
"""
Check host NUMA node amount
:param test: QEMU test object
Get host NUMA node whose node size is not zero
"""
host_numa_nodes
=
utils_memory
.
numa_nodes
()
host_numa_nodes
=
len
(
host_numa_nodes
)
if
host_numa_nodes
<
2
:
test
.
cancel
(
"The host numa nodes should be at least 2! But there is %d."
%
host_numa_nodes
)
host_numa
=
utils_memory
.
numa_nodes
()
node_list
=
[]
numa_info
=
process
.
getoutput
(
"numactl -H"
)
for
i
in
host_numa
:
node_size
=
re
.
findall
(
r
"node %d size: \d+ \w"
%
i
,
numa_info
)[
0
].
split
()[
-
2
]
if
node_size
!=
'0'
:
node_list
.
append
(
str
(
i
))
return
node_list
def
check_query_memdev
(
test
,
params
,
vm
):
...
...
@@ -102,7 +106,7 @@ def check_memory_in_procfs(test, params, vm):
def
run
(
test
,
params
,
env
):
"""
[Memory][Numa] NUMA memdev option, this case will:
1) Check host's numa node(s)
amount
.
1) Check host's numa node(s).
2) Start the VM.
3) Check query-memdev.
4) Check the memory in procfs.
...
...
@@ -111,10 +115,36 @@ def run(test, params, env):
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment
"""
error_context
.
context
(
"Check host's numa node(s) amount!"
,
logging
.
info
)
check_host_numa_node_amount
(
test
)
error_context
.
context
(
"Check host's numa node(s)!"
,
logging
.
info
)
valid_nodes
=
get_host_numa_node
()
if
len
(
valid_nodes
)
<
2
:
test
.
cancel
(
"The host numa nodes that whose size is not zero should be "
"at least 2! But there is %d."
%
len
(
valid_nodes
))
node1
=
valid_nodes
[
0
]
node2
=
valid_nodes
[
1
]
if
params
.
get
(
'policy_mem'
)
!=
'default'
:
error_context
.
context
(
"Assign host's numa node(s)!"
,
logging
.
info
)
params
[
'host-nodes_mem0'
]
=
node1
params
[
'host-nodes_mem1'
]
=
node2
if
params
.
get
(
'set_node_hugepage'
)
==
'yes'
:
hugepage_size
=
utils_memory
.
get_huge_page_size
()
normalize_total_hg1
=
int
(
normalize_data_size
(
params
[
'size_mem0'
],
'K'
))
hugepage_num1
=
normalize_total_hg1
//
hugepage_size
if
'numa_hugepage'
in
params
[
'shortname'
]:
params
[
'target_nodes'
]
=
"%s %s"
%
(
node1
,
node2
)
normalize_total_hg2
=
int
(
normalize_data_size
(
params
[
'size_mem1'
],
'K'
))
hugepage_num2
=
normalize_total_hg2
//
hugepage_size
params
[
'target_num_node%s'
%
node2
]
=
hugepage_num2
else
:
params
[
'target_nodes'
]
=
node1
params
[
'target_num_node%s'
%
node1
]
=
hugepage_num1
params
[
'setup_hugepages'
]
=
'yes'
env_process
.
preprocess
(
test
,
params
,
env
)
error_context
.
context
(
"Starting VM!"
,
logging
.
info
)
env_process
.
preprocess_vm
(
test
,
params
,
env
,
params
[
"main_vm"
])
vm
=
env
.
get_vm
(
params
[
"main_vm"
])
vm
.
verify_alive
()
...
...
@@ -123,3 +153,4 @@ def run(test, params, env):
error_context
.
context
(
"Check the memory in procfs!"
,
logging
.
info
)
check_memory_in_procfs
(
test
,
params
,
vm
)
vm
.
verify_dmesg
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录