提交 574c0cfb 编写于 作者: P Paolo Bonzini

Merge tag 'kvm-ppc-next-4.20-2' of...

Merge tag 'kvm-ppc-next-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD

Second PPC KVM update for 4.20.

Two commits; one is an optimization for PCI pass-through, and the
other disables nested HV-KVM on early POWER9 chips that need a
particular hardware bug workaround.
...@@ -126,7 +126,7 @@ struct iommu_table { ...@@ -126,7 +126,7 @@ struct iommu_table {
int it_nid; int it_nid;
}; };
#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \ #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
((tbl)->it_ops->useraddrptr((tbl), (entry), false)) ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
((tbl)->it_ops->useraddrptr((tbl), (entry), true)) ((tbl)->it_ops->useraddrptr((tbl), (entry), true))
......
...@@ -410,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, ...@@ -410,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
{ {
struct mm_iommu_table_group_mem_t *mem = NULL; struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift; const unsigned long pgsize = 1ULL << tbl->it_page_shift;
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua) if (!pua)
/* it_userspace allocation might be delayed */ return H_SUCCESS;
return H_TOO_HARD;
mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize); mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem) if (!mem)
......
...@@ -214,7 +214,7 @@ static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, ...@@ -214,7 +214,7 @@ static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
if (!ret && ((*direction == DMA_FROM_DEVICE) || if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL))) { (*direction == DMA_BIDIRECTIONAL))) {
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
/* /*
* kvmppc_rm_tce_iommu_do_map() updates the UA cache after * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
* calling this so we still get here a valid UA. * calling this so we still get here a valid UA.
...@@ -240,7 +240,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, ...@@ -240,7 +240,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
{ {
struct mm_iommu_table_group_mem_t *mem = NULL; struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift; const unsigned long pgsize = 1ULL << tbl->it_page_shift;
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua) if (!pua)
/* it_userspace allocation might be delayed */ /* it_userspace allocation might be delayed */
...@@ -304,7 +304,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, ...@@ -304,7 +304,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
{ {
long ret; long ret;
unsigned long hpa = 0; unsigned long hpa = 0;
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
struct mm_iommu_table_group_mem_t *mem; struct mm_iommu_table_group_mem_t *mem;
if (!pua) if (!pua)
......
...@@ -4174,7 +4174,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -4174,7 +4174,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do { do {
if (kvm->arch.threads_indep && kvm_is_radix(kvm)) /*
* The early POWER9 chips that can't mix radix and HPT threads
* on the same core also need the workaround for the problem
* where the TLB would prefetch entries in the guest exit path
* for radix guests using the guest PIDR value and LPID 0.
* The workaround is in the old path (kvmppc_run_vcpu())
* but not the new path (kvmhv_run_single_vcpu()).
*/
if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
!no_mixing_hpt_and_radix)
r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0, r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr); vcpu->arch.vcore->lpcr);
else else
...@@ -5196,7 +5205,7 @@ static int kvmhv_enable_nested(struct kvm *kvm) ...@@ -5196,7 +5205,7 @@ static int kvmhv_enable_nested(struct kvm *kvm)
{ {
if (!nested) if (!nested)
return -EPERM; return -EPERM;
if (!cpu_has_feature(CPU_FTR_ARCH_300)) if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
return -ENODEV; return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */ /* kvm == NULL means the caller is testing if the capability exists */
......
...@@ -444,7 +444,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container, ...@@ -444,7 +444,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
struct mm_iommu_table_group_mem_t *mem = NULL; struct mm_iommu_table_group_mem_t *mem = NULL;
int ret; int ret;
unsigned long hpa = 0; unsigned long hpa = 0;
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua) if (!pua)
return; return;
...@@ -467,8 +467,27 @@ static int tce_iommu_clear(struct tce_container *container, ...@@ -467,8 +467,27 @@ static int tce_iommu_clear(struct tce_container *container,
unsigned long oldhpa; unsigned long oldhpa;
long ret; long ret;
enum dma_data_direction direction; enum dma_data_direction direction;
unsigned long lastentry = entry + pages;
for ( ; entry < lastentry; ++entry) {
if (tbl->it_indirect_levels && tbl->it_userspace) {
/*
* For multilevel tables, we can take a shortcut here
* and skip some TCEs as we know that the userspace
* addresses cache is a mirror of the real TCE table
* and if it is missing some indirect levels, then
* the hardware table does not have them allocated
* either and therefore does not require updating.
*/
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
entry);
if (!pua) {
/* align to level_size which is power of two */
entry |= tbl->it_level_size - 1;
continue;
}
}
for ( ; pages; --pages, ++entry) {
cond_resched(); cond_resched();
direction = DMA_NONE; direction = DMA_NONE;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册