提交 beca5470 编写于 作者: A Andrew Jones 提交者: Paolo Bonzini

KVM: selftests: virt_map should take npages, not size

Also correct the comment and prototype for vm_create_default(),
as it takes a number of pages, not a size.
Signed-off-by: NAndrew Jones <drjones@redhat.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 d0aac332
...@@ -410,8 +410,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd, ...@@ -410,8 +410,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
guest_num_pages, 0); guest_num_pages, 0);
/* Do mapping for the demand paging memory slot */ /* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
guest_num_pages * guest_page_size, 0);
ucall_init(vm, NULL); ucall_init(vm, NULL);
......
...@@ -334,8 +334,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -334,8 +334,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
KVM_MEM_LOG_DIRTY_PAGES); KVM_MEM_LOG_DIRTY_PAGES);
/* Do mapping for the dirty track memory slot */ /* Do mapping for the dirty track memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
guest_num_pages * guest_page_size, 0);
/* Cache the HVA pointer of the region */ /* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
......
...@@ -117,7 +117,7 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); ...@@ -117,7 +117,7 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
uint32_t data_memslot, uint32_t pgd_memslot); uint32_t data_memslot, uint32_t pgd_memslot);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
size_t size, uint32_t pgd_memslot); unsigned int npages, uint32_t pgd_memslot);
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
...@@ -226,7 +226,7 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, ...@@ -226,7 +226,7 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
* *
* Input Args: * Input Args:
* vcpuid - The id of the single VCPU to add to the VM. * vcpuid - The id of the single VCPU to add to the VM.
* extra_mem_pages - The size of extra memories to add (this will * extra_mem_pages - The number of extra pages to add (this will
* decide how much extra space we will need to * decide how much extra space we will need to
* setup the page tables using memslot 0) * setup the page tables using memslot 0)
* guest_code - The vCPU's entry point * guest_code - The vCPU's entry point
...@@ -236,7 +236,7 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, ...@@ -236,7 +236,7 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
* Return: * Return:
* Pointer to opaque structure that describes the created VM. * Pointer to opaque structure that describes the created VM.
*/ */
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size, struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
void *guest_code); void *guest_code);
/* /*
......
...@@ -1015,21 +1015,21 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, ...@@ -1015,21 +1015,21 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
* vm - Virtual Machine * vm - Virtual Machine
* vaddr - Virtuall address to map * vaddr - Virtuall address to map
* paddr - VM Physical Address * paddr - VM Physical Address
* size - The size of the range to map * npages - The number of pages to map
* pgd_memslot - Memory region slot for new virtual translation tables * pgd_memslot - Memory region slot for new virtual translation tables
* *
* Output Args: None * Output Args: None
* *
* Return: None * Return: None
* *
* Within the VM given by vm, creates a virtual translation for the * Within the VM given by @vm, creates a virtual translation for
* page range starting at vaddr to the page range starting at paddr. * @npages starting at @vaddr to the page range starting at @paddr.
*/ */
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
size_t size, uint32_t pgd_memslot) unsigned int npages, uint32_t pgd_memslot)
{ {
size_t page_size = vm->page_size; size_t page_size = vm->page_size;
size_t npages = size / page_size; size_t size = npages * page_size;
TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
......
...@@ -87,7 +87,7 @@ static void test_move_memory_region(void) ...@@ -87,7 +87,7 @@ static void test_move_memory_region(void)
gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2 * 4096, 0); virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0);
/* Ditto for the host mapping so that both pages can be zeroed. */ /* Ditto for the host mapping so that both pages can be zeroed. */
hva = addr_gpa2hva(vm, MEM_REGION_GPA); hva = addr_gpa2hva(vm, MEM_REGION_GPA);
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
/* The memory slot index to track dirty pages */ /* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1 #define TEST_MEM_SLOT_INDEX 1
#define TEST_MEM_SIZE 3 #define TEST_MEM_PAGES 3
/* L1 guest test virtual memory offset */ /* L1 guest test virtual memory offset */
#define GUEST_TEST_MEM 0xc0000000 #define GUEST_TEST_MEM 0xc0000000
...@@ -91,15 +91,14 @@ int main(int argc, char *argv[]) ...@@ -91,15 +91,14 @@ int main(int argc, char *argv[])
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
GUEST_TEST_MEM, GUEST_TEST_MEM,
TEST_MEM_SLOT_INDEX, TEST_MEM_SLOT_INDEX,
TEST_MEM_SIZE, TEST_MEM_PAGES,
KVM_MEM_LOG_DIRTY_PAGES); KVM_MEM_LOG_DIRTY_PAGES);
/* /*
* Add an identity map for GVA range [0xc0000000, 0xc0002000). This * Add an identity map for GVA range [0xc0000000, 0xc0002000). This
* affects both L1 and L2. However... * affects both L1 and L2. However...
*/ */
virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES, 0);
TEST_MEM_SIZE * 4096, 0);
/* /*
* ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
...@@ -113,11 +112,11 @@ int main(int argc, char *argv[]) ...@@ -113,11 +112,11 @@ int main(int argc, char *argv[])
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0); nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0); nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
bmap = bitmap_alloc(TEST_MEM_SIZE); bmap = bitmap_alloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM); host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) { while (!done) {
memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096); memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
_vcpu_run(vm, VCPU_ID); _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册