提交 33cd6e64 编写于 作者: L Lu Baolu 提交者: Joerg Roedel

iommu/vt-d: Flush PASID-based iotlb for iova over first level

When software has changed first-level tables, it should invalidate
the affected IOTLB and the paging-structure-caches using the PASID-
based-IOTLB Invalidate Descriptor defined in spec 6.5.2.4.
Signed-off-by: NLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: NJoerg Roedel <jroedel@suse.de>
上级 ddf09b6d
...@@ -1371,6 +1371,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, ...@@ -1371,6 +1371,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
/* PASID-based IOTLB invalidation */
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
unsigned long npages, bool ih)
{
struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
/*
* npages == -1 means a PASID-selective invalidation, otherwise,
* a positive value for Page-selective-within-PASID invalidation.
* 0 is not a valid input.
*/
if (WARN_ON(!npages)) {
pr_err("Invalid input npages = %ld\n", npages);
return;
}
if (npages == -1) {
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = 0;
} else {
int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
if (WARN_ON_ONCE(!ALIGN(addr, align)))
addr &= ~(align - 1);
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = QI_EIOTLB_ADDR(addr) |
QI_EIOTLB_IH(ih) |
QI_EIOTLB_AM(mask);
}
qi_submit_sync(&desc, iommu);
}
/* /*
* Disable Queued Invalidation interface. * Disable Queued Invalidation interface.
*/ */
......
...@@ -1509,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, ...@@ -1509,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
} }
static void domain_flush_piotlb(struct intel_iommu *iommu,
struct dmar_domain *domain,
u64 addr, unsigned long npages, bool ih)
{
u16 did = domain->iommu_did[iommu->seq_id];
if (domain->default_pasid)
qi_flush_piotlb(iommu, did, domain->default_pasid,
addr, npages, ih);
if (!list_empty(&domain->devices))
qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain, struct dmar_domain *domain,
unsigned long pfn, unsigned int pages, unsigned long pfn, unsigned int pages,
...@@ -1522,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, ...@@ -1522,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih) if (ih)
ih = 1 << 6; ih = 1 << 6;
if (domain_use_first_level(domain)) {
domain_flush_piotlb(iommu, domain, addr, pages, ih);
} else {
/* /*
* Fallback to domain selective flush if no PSI support or the size is * Fallback to domain selective flush if no PSI support or
* too big. * the size is too big. PSI requires page size to be 2 ^ x,
* PSI requires page size to be 2 ^ x, and the base address is naturally * and the base address is naturally aligned to the size.
* aligned to the size
*/ */
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, did, 0, 0, iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH); DMA_TLB_DSI_FLUSH);
else else
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
DMA_TLB_PSI_FLUSH); DMA_TLB_PSI_FLUSH);
}
/* /*
* In caching mode, changes of pages from non-present to present require * In caching mode, changes of pages from non-present to present require
...@@ -1548,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu, ...@@ -1548,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
struct dmar_domain *domain, struct dmar_domain *domain,
unsigned long pfn, unsigned int pages) unsigned long pfn, unsigned int pages)
{ {
/* It's a non-present to present mapping. Only flush if caching mode */ /*
if (cap_caching_mode(iommu->cap)) * It's a non-present to present mapping. Only flush if caching mode
* and second level.
*/
if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1); iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
else else
iommu_flush_write_buffer(iommu); iommu_flush_write_buffer(iommu);
...@@ -1566,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad) ...@@ -1566,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad)
struct intel_iommu *iommu = g_iommus[idx]; struct intel_iommu *iommu = g_iommus[idx];
u16 did = domain->iommu_did[iommu->seq_id]; u16 did = domain->iommu_did[iommu->seq_id];
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); if (domain_use_first_level(domain))
domain_flush_piotlb(iommu, domain, 0, -1, 0);
else
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
if (!cap_caching_mode(iommu->cap)) if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
......
...@@ -650,6 +650,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, ...@@ -650,6 +650,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type); unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask); u16 qdep, u64 addr, unsigned mask);
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
unsigned long npages, bool ih);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void); extern int dmar_ir_support(void);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册