提交 4307af73 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: selftests: Unconditionally use memslot '0' for page table allocations

Drop the memslot param from virt_pg_map() and virt_map() and shove the
hardcoded '0' down to the vm_phy_page_alloc() calls.

No functional change intended.
Signed-off-by: NSean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-13-seanjc@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 a75a895e
...@@ -760,7 +760,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -760,7 +760,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
KVM_MEM_LOG_DIRTY_PAGES); KVM_MEM_LOG_DIRTY_PAGES);
/* Do mapping for the dirty track memory slot */ /* Do mapping for the dirty track memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0); virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */ /* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
......
...@@ -145,7 +145,7 @@ vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); ...@@ -145,7 +145,7 @@ vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
unsigned int npages, uint32_t pgd_memslot); unsigned int npages);
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
...@@ -256,8 +256,7 @@ void virt_pgd_alloc(struct kvm_vm *vm); ...@@ -256,8 +256,7 @@ void virt_pgd_alloc(struct kvm_vm *vm);
* Within @vm, creates a virtual translation for the page starting * Within @vm, creates a virtual translation for the page starting
* at @vaddr to the page starting at @paddr. * at @vaddr to the page starting at @paddr.
*/ */
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
uint32_t memslot);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot); uint32_t memslot);
......
...@@ -303,7 +303,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) ...@@ -303,7 +303,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
TEST_MEM_SLOT_INDEX, guest_num_pages, 0); TEST_MEM_SLOT_INDEX, guest_num_pages, 0);
/* Do mapping(GVA->GPA) for the testing memory slot */ /* Do mapping(GVA->GPA) for the testing memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0); virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */ /* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
......
...@@ -83,8 +83,8 @@ void virt_pgd_alloc(struct kvm_vm *vm) ...@@ -83,8 +83,8 @@ void virt_pgd_alloc(struct kvm_vm *vm)
} }
} }
void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint32_t pgd_memslot, uint64_t flags) uint64_t flags)
{ {
uint8_t attr_idx = flags & 7; uint8_t attr_idx = flags & 7;
uint64_t *ptep; uint64_t *ptep;
...@@ -105,7 +105,7 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -105,7 +105,7 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
if (!*ptep) { if (!*ptep) {
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
*ptep |= 3; *ptep |= 3;
} }
...@@ -113,14 +113,14 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -113,14 +113,14 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
case 4: case 4:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
if (!*ptep) { if (!*ptep) {
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
*ptep |= 3; *ptep |= 3;
} }
/* fall through */ /* fall through */
case 3: case 3:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
if (!*ptep) { if (!*ptep) {
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
*ptep |= 3; *ptep |= 3;
} }
/* fall through */ /* fall through */
...@@ -135,12 +135,11 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -135,12 +135,11 @@ void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
*ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */; *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
uint32_t pgd_memslot)
{ {
uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */ uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
_virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx); _virt_pg_map(vm, vaddr, paddr, attr_idx);
} }
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
......
...@@ -14,7 +14,7 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) ...@@ -14,7 +14,7 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1)) if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
return false; return false;
virt_pg_map(vm, gpa, gpa, 0); virt_pg_map(vm, gpa, gpa);
ucall_exit_mmio_addr = (vm_vaddr_t *)gpa; ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
sync_global_to_guest(vm, ucall_exit_mmio_addr); sync_global_to_guest(vm, ucall_exit_mmio_addr);
......
...@@ -1265,7 +1265,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) ...@@ -1265,7 +1265,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
for (vm_vaddr_t vaddr = vaddr_start; pages > 0; for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
pages--, vaddr += vm->page_size, paddr += vm->page_size) { pages--, vaddr += vm->page_size, paddr += vm->page_size) {
virt_pg_map(vm, vaddr, paddr, 0); virt_pg_map(vm, vaddr, paddr);
sparsebit_set(vm->vpages_mapped, sparsebit_set(vm->vpages_mapped,
vaddr >> vm->page_shift); vaddr >> vm->page_shift);
...@@ -1330,7 +1330,7 @@ vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) ...@@ -1330,7 +1330,7 @@ vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
* @npages starting at @vaddr to the page range starting at @paddr. * @npages starting at @vaddr to the page range starting at @paddr.
*/ */
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
unsigned int npages, uint32_t pgd_memslot) unsigned int npages)
{ {
size_t page_size = vm->page_size; size_t page_size = vm->page_size;
size_t size = npages * page_size; size_t size = npages * page_size;
...@@ -1339,7 +1339,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -1339,7 +1339,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) { while (npages--) {
virt_pg_map(vm, vaddr, paddr, pgd_memslot); virt_pg_map(vm, vaddr, paddr);
vaddr += page_size; vaddr += page_size;
paddr += page_size; paddr += page_size;
} }
......
...@@ -101,7 +101,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, ...@@ -101,7 +101,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
guest_num_pages, 0); guest_num_pages, 0);
/* Do mapping for the demand paging memory slot */ /* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0); virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
ucall_init(vm, NULL); ucall_init(vm, NULL);
......
...@@ -36,12 +36,12 @@ void virt_pgd_alloc(struct kvm_vm *vm) ...@@ -36,12 +36,12 @@ void virt_pgd_alloc(struct kvm_vm *vm)
* a page table (ri == 4). Returns a suitable region/segment table entry * a page table (ri == 4). Returns a suitable region/segment table entry
* which points to the freshly allocated pages. * which points to the freshly allocated pages.
*/ */
static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
{ {
uint64_t taddr; uint64_t taddr;
taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
return (taddr & REGION_ENTRY_ORIGIN) return (taddr & REGION_ENTRY_ORIGIN)
...@@ -49,8 +49,7 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) ...@@ -49,8 +49,7 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot)
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa, void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
uint32_t memslot)
{ {
int ri, idx; int ri, idx;
uint64_t *entry; uint64_t *entry;
...@@ -77,7 +76,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa, ...@@ -77,7 +76,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa,
for (ri = 1; ri <= 4; ri++) { for (ri = 1; ri <= 4; ri++) {
idx = (gva >> (64 - 11 * ri)) & 0x7ffu; idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
if (entry[idx] & REGION_ENTRY_INVALID) if (entry[idx] & REGION_ENTRY_INVALID)
entry[idx] = virt_alloc_region(vm, ri, memslot); entry[idx] = virt_alloc_region(vm, ri);
entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN); entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
} }
......
...@@ -221,8 +221,7 @@ void virt_pgd_alloc(struct kvm_vm *vm) ...@@ -221,8 +221,7 @@ void virt_pgd_alloc(struct kvm_vm *vm)
} }
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
uint32_t pgd_memslot)
{ {
uint16_t index[4]; uint16_t index[4];
struct pageMapL4Entry *pml4e; struct pageMapL4Entry *pml4e;
...@@ -256,7 +255,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -256,7 +255,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
pml4e = addr_gpa2hva(vm, vm->pgd); pml4e = addr_gpa2hva(vm, vm->pgd);
if (!pml4e[index[3]].present) { if (!pml4e[index[3]].present) {
pml4e[index[3]].address = vm_phy_page_alloc(vm, pml4e[index[3]].address = vm_phy_page_alloc(vm,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot) KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift; >> vm->page_shift;
pml4e[index[3]].writable = true; pml4e[index[3]].writable = true;
pml4e[index[3]].present = true; pml4e[index[3]].present = true;
...@@ -267,7 +266,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -267,7 +266,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
if (!pdpe[index[2]].present) { if (!pdpe[index[2]].present) {
pdpe[index[2]].address = vm_phy_page_alloc(vm, pdpe[index[2]].address = vm_phy_page_alloc(vm,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot) KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift; >> vm->page_shift;
pdpe[index[2]].writable = true; pdpe[index[2]].writable = true;
pdpe[index[2]].present = true; pdpe[index[2]].present = true;
...@@ -278,7 +277,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -278,7 +277,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
if (!pde[index[1]].present) { if (!pde[index[1]].present) {
pde[index[1]].address = vm_phy_page_alloc(vm, pde[index[1]].address = vm_phy_page_alloc(vm,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot) KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift; >> vm->page_shift;
pde[index[1]].writable = true; pde[index[1]].writable = true;
pde[index[1]].present = true; pde[index[1]].present = true;
......
...@@ -306,7 +306,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, ...@@ -306,7 +306,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
guest_addr += npages * 4096; guest_addr += npages * 4096;
} }
virt_map(data->vm, MEM_GPA, MEM_GPA, mempages, 0); virt_map(data->vm, MEM_GPA, MEM_GPA, mempages);
sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
atomic_init(&sync->start_flag, false); atomic_init(&sync->start_flag, false);
......
...@@ -132,7 +132,7 @@ static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code) ...@@ -132,7 +132,7 @@ static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code)
gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0); virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2);
/* Ditto for the host mapping so that both pages can be zeroed. */ /* Ditto for the host mapping so that both pages can be zeroed. */
hva = addr_gpa2hva(vm, MEM_REGION_GPA); hva = addr_gpa2hva(vm, MEM_REGION_GPA);
......
...@@ -293,7 +293,7 @@ int main(int ac, char **av) ...@@ -293,7 +293,7 @@ int main(int ac, char **av)
vm = vm_create_default(0, 0, guest_code); vm = vm_create_default(0, 0, guest_code);
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS); gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0); vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages, 0); virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
ucall_init(vm, NULL); ucall_init(vm, NULL);
/* Add the rest of the VCPUs */ /* Add the rest of the VCPUs */
......
...@@ -97,7 +97,7 @@ int main(int argc, char *argv[]) ...@@ -97,7 +97,7 @@ int main(int argc, char *argv[])
* Add an identity map for GVA range [0xc0000000, 0xc0002000). This * Add an identity map for GVA range [0xc0000000, 0xc0002000). This
* affects both L1 and L2. However... * affects both L1 and L2. However...
*/ */
virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES, 0); virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
/* /*
* ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
......
...@@ -423,7 +423,7 @@ int main(int argc, char *argv[]) ...@@ -423,7 +423,7 @@ int main(int argc, char *argv[])
vcpu_init_descriptor_tables(vm, HALTER_VCPU_ID); vcpu_init_descriptor_tables(vm, HALTER_VCPU_ID);
vm_handle_exception(vm, IPI_VECTOR, guest_ipi_handler); vm_handle_exception(vm, IPI_VECTOR, guest_ipi_handler);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA, 0); virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code); vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code);
......
...@@ -146,7 +146,7 @@ int main(int argc, char *argv[]) ...@@ -146,7 +146,7 @@ int main(int argc, char *argv[])
/* Map a region for the shared_info page */ /* Map a region for the shared_info page */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0); SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 2, 0);
virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 2, 0); virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 2);
struct kvm_xen_hvm_config hvmc = { struct kvm_xen_hvm_config hvmc = {
.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
......
...@@ -103,7 +103,7 @@ int main(int argc, char *argv[]) ...@@ -103,7 +103,7 @@ int main(int argc, char *argv[])
/* Map a region for the hypercall pages */ /* Map a region for the hypercall pages */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0); HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0);
virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2, 0); virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2);
for (;;) { for (;;) {
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册