提交 7276030a 编写于 作者: M Mario Smarduch 提交者: Christoffer Dall

KVM: arm/arm64: Enable Dirty Page logging for ARMv8

This patch enables ARMv8 ditry page logging support. Plugs ARMv8 into generic
layer through Kconfig symbol, and drops earlier ARM64 constraints to enable
logging at architecture layer.
Reviewed-by: NChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: NMario Smarduch <m.smarduch@samsung.com>
上级 9836c6b9
...@@ -221,18 +221,6 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, ...@@ -221,18 +221,6 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
} }
/**
* kvm_flush_remote_tlbs() - flush all VM TLB entries
* @kvm: pointer to kvm structure.
*
* Interface to HYP function to flush all VM TLB entries without address
* parameter.
*/
static inline void kvm_flush_remote_tlbs(struct kvm *kvm)
{
kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
}
static inline int kvm_arch_dev_ioctl_check_extension(long ext) static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{ {
return 0; return 0;
......
...@@ -808,7 +808,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -808,7 +808,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
*/ */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{ {
#ifdef CONFIG_ARM
bool is_dirty = false; bool is_dirty = false;
int r; int r;
...@@ -821,9 +820,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) ...@@ -821,9 +820,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
return r; return r;
#else /* arm64 */
return -EINVAL;
#endif
} }
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
......
...@@ -52,11 +52,18 @@ static phys_addr_t hyp_idmap_vector; ...@@ -52,11 +52,18 @@ static phys_addr_t hyp_idmap_vector;
static bool memslot_is_logging(struct kvm_memory_slot *memslot) static bool memslot_is_logging(struct kvm_memory_slot *memslot)
{ {
#ifdef CONFIG_ARM
return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
#else }
return false;
#endif /**
* kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
* @kvm: pointer to kvm structure.
*
* Interface to HYP function to flush all VM TLB entries
*/
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
} }
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
...@@ -950,7 +957,6 @@ static bool kvm_is_device_pfn(unsigned long pfn) ...@@ -950,7 +957,6 @@ static bool kvm_is_device_pfn(unsigned long pfn)
return !pfn_valid(pfn); return !pfn_valid(pfn);
} }
#ifdef CONFIG_ARM
/** /**
* stage2_wp_ptes - write protect PMD range * stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry * @pmd: pointer to pmd entry
...@@ -1095,7 +1101,6 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, ...@@ -1095,7 +1101,6 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
stage2_wp_range(kvm, start, end); stage2_wp_range(kvm, start, end);
} }
#endif
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva, struct kvm_memory_slot *memslot, unsigned long hva,
...@@ -1511,7 +1516,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -1511,7 +1516,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old, const struct kvm_memory_slot *old,
enum kvm_mr_change change) enum kvm_mr_change change)
{ {
#ifdef CONFIG_ARM
/* /*
* At this point memslot has been committed and there is an * At this point memslot has been committed and there is an
* allocated dirty_bitmap[], dirty pages will be be tracked while the * allocated dirty_bitmap[], dirty pages will be be tracked while the
...@@ -1519,7 +1523,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, ...@@ -1519,7 +1523,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
*/ */
if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
kvm_mmu_wp_memory_region(kvm, mem->slot); kvm_mmu_wp_memory_region(kvm, mem->slot);
#endif
} }
int kvm_arch_prepare_memory_region(struct kvm *kvm, int kvm_arch_prepare_memory_region(struct kvm *kvm,
......
...@@ -22,10 +22,12 @@ config KVM ...@@ -22,10 +22,12 @@ config KVM
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO select KVM_MMIO
select KVM_ARM_HOST select KVM_ARM_HOST
select KVM_ARM_VGIC select KVM_ARM_VGIC
select KVM_ARM_TIMER select KVM_ARM_TIMER
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
---help--- ---help---
Support hosting virtualized guest machines. Support hosting virtualized guest machines.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册