提交 4b205766 编写于 作者: L Linus Torvalds

Merge tag 'iommu-fixes-v5.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Compile warning fix for the Intel IOMMU driver

 - Fix kdump boot with Intel IOMMU enabled and in passthrough mode

 - Disable AMD IOMMU on a Laptop/Embedded platform because the delay it
   introduces in DMA transactions causes screen flickering there with 4k
   monitors

 - Make domain_free function in QCOM IOMMU driver robust and not leak
   memory/dereference NULL pointers

 - Fix ARM-SMMU module parameter prefix names

* tag 'iommu-fixes-v5.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/arm-smmu: Restore naming of driver parameter prefix
  iommu/qcom: Fix bogus detach logic
  iommu/amd: Disable IOMMU on Stoney Ridge systems
  iommu/vt-d: Simplify check in identity_mapping()
  iommu/vt-d: Remove deferred_attach_domain()
  iommu/vt-d: Do deferred attachment in iommu_need_mapping()
  iommu/vt-d: Move deferred device attachment into helper function
  iommu/vt-d: Add attach_deferred() helper
  iommu/vt-d: Fix compile warning from intel-svm.h
...@@ -14,8 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o ...@@ -14,8 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
......
...@@ -2523,6 +2523,7 @@ static int __init early_amd_iommu_init(void) ...@@ -2523,6 +2523,7 @@ static int __init early_amd_iommu_init(void)
struct acpi_table_header *ivrs_base; struct acpi_table_header *ivrs_base;
acpi_status status; acpi_status status;
int i, remap_cache_sz, ret = 0; int i, remap_cache_sz, ret = 0;
u32 pci_id;
if (!amd_iommu_detected) if (!amd_iommu_detected)
return -ENODEV; return -ENODEV;
...@@ -2610,6 +2611,16 @@ static int __init early_amd_iommu_init(void) ...@@ -2610,6 +2611,16 @@ static int __init early_amd_iommu_init(void)
if (ret) if (ret)
goto out; goto out;
/* Disable IOMMU if there's Stoney Ridge graphics */
for (i = 0; i < 32; i++) {
pci_id = read_pci_config(0, i, 0, 0);
if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
pr_info("Disable IOMMU on Stoney Ridge\n");
amd_iommu_disabled = true;
break;
}
}
/* Disable any previously enabled IOMMUs */ /* Disable any previously enabled IOMMUs */
if (!is_kdump_kernel() || amd_iommu_disabled) if (!is_kdump_kernel() || amd_iommu_disabled)
disable_iommus(); disable_iommus();
...@@ -2718,7 +2729,7 @@ static int __init state_next(void) ...@@ -2718,7 +2729,7 @@ static int __init state_next(void)
ret = early_amd_iommu_init(); ret = early_amd_iommu_init();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
pr_info("AMD IOMMU disabled on kernel command-line\n"); pr_info("AMD IOMMU disabled\n");
init_state = IOMMU_CMDLINE_DISABLED; init_state = IOMMU_CMDLINE_DISABLED;
ret = -EINVAL; ret = -EINVAL;
} }
......
...@@ -762,6 +762,11 @@ static int iommu_dummy(struct device *dev) ...@@ -762,6 +762,11 @@ static int iommu_dummy(struct device *dev)
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
} }
static bool attach_deferred(struct device *dev)
{
return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
}
/** /**
* is_downstream_to_pci_bridge - test if a device belongs to the PCI * is_downstream_to_pci_bridge - test if a device belongs to the PCI
* sub-hierarchy of a candidate PCI-PCI bridge * sub-hierarchy of a candidate PCI-PCI bridge
...@@ -2510,8 +2515,7 @@ struct dmar_domain *find_domain(struct device *dev) ...@@ -2510,8 +2515,7 @@ struct dmar_domain *find_domain(struct device *dev)
{ {
struct device_domain_info *info; struct device_domain_info *info;
if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO || if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
return NULL; return NULL;
if (dev_is_pci(dev)) if (dev_is_pci(dev))
...@@ -2525,18 +2529,14 @@ struct dmar_domain *find_domain(struct device *dev) ...@@ -2525,18 +2529,14 @@ struct dmar_domain *find_domain(struct device *dev)
return NULL; return NULL;
} }
static struct dmar_domain *deferred_attach_domain(struct device *dev) static void do_deferred_attach(struct device *dev)
{ {
if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) { struct iommu_domain *domain;
struct iommu_domain *domain;
dev->archdata.iommu = NULL;
domain = iommu_get_domain_for_dev(dev);
if (domain)
intel_iommu_attach_device(domain, dev);
}
return find_domain(dev); dev->archdata.iommu = NULL;
domain = iommu_get_domain_for_dev(dev);
if (domain)
intel_iommu_attach_device(domain, dev);
} }
static inline struct device_domain_info * static inline struct device_domain_info *
...@@ -2916,7 +2916,7 @@ static int identity_mapping(struct device *dev) ...@@ -2916,7 +2916,7 @@ static int identity_mapping(struct device *dev)
struct device_domain_info *info; struct device_domain_info *info;
info = dev->archdata.iommu; info = dev->archdata.iommu;
if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO) if (info)
return (info->domain == si_domain); return (info->domain == si_domain);
return 0; return 0;
...@@ -3587,6 +3587,9 @@ static bool iommu_need_mapping(struct device *dev) ...@@ -3587,6 +3587,9 @@ static bool iommu_need_mapping(struct device *dev)
if (iommu_dummy(dev)) if (iommu_dummy(dev))
return false; return false;
if (unlikely(attach_deferred(dev)))
do_deferred_attach(dev);
ret = identity_mapping(dev); ret = identity_mapping(dev);
if (ret) { if (ret) {
u64 dma_mask = *dev->dma_mask; u64 dma_mask = *dev->dma_mask;
...@@ -3635,7 +3638,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, ...@@ -3635,7 +3638,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
domain = deferred_attach_domain(dev); domain = find_domain(dev);
if (!domain) if (!domain)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
...@@ -3855,7 +3858,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ...@@ -3855,7 +3858,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev)) if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs); return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = deferred_attach_domain(dev); domain = find_domain(dev);
if (!domain) if (!domain)
return 0; return 0;
...@@ -3950,7 +3953,11 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, ...@@ -3950,7 +3953,11 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
int prot = 0; int prot = 0;
int ret; int ret;
domain = deferred_attach_domain(dev); if (unlikely(attach_deferred(dev)))
do_deferred_attach(dev);
domain = find_domain(dev);
if (WARN_ON(dir == DMA_NONE || !domain)) if (WARN_ON(dir == DMA_NONE || !domain))
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
...@@ -6133,7 +6140,7 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) ...@@ -6133,7 +6140,7 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; return attach_deferred(dev);
} }
static int static int
......
...@@ -344,21 +344,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) ...@@ -344,21 +344,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
{ {
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
return;
iommu_put_dma_cookie(domain); iommu_put_dma_cookie(domain);
/* NOTE: unmap can be called after client device is powered off, if (qcom_domain->iommu) {
* for example, with GPUs or anything involving dma-buf. So we /*
* cannot rely on the device_link. Make sure the IOMMU is on to * NOTE: unmap can be called after client device is powered
* avoid unclocked accesses in the TLB inv path: * off, for example, with GPUs or anything involving dma-buf.
*/ * So we cannot rely on the device_link. Make sure the IOMMU
pm_runtime_get_sync(qcom_domain->iommu->dev); * is on to avoid unclocked accesses in the TLB inv path:
*/
free_io_pgtable_ops(qcom_domain->pgtbl_ops); pm_runtime_get_sync(qcom_domain->iommu->dev);
free_io_pgtable_ops(qcom_domain->pgtbl_ops);
pm_runtime_put_sync(qcom_domain->iommu->dev); pm_runtime_put_sync(qcom_domain->iommu->dev);
}
kfree(qcom_domain); kfree(qcom_domain);
} }
...@@ -404,7 +402,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de ...@@ -404,7 +402,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
unsigned i; unsigned i;
if (!qcom_domain->iommu) if (WARN_ON(!qcom_domain->iommu))
return; return;
pm_runtime_get_sync(qcom_iommu->dev); pm_runtime_get_sync(qcom_iommu->dev);
...@@ -417,8 +415,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de ...@@ -417,8 +415,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
ctx->domain = NULL; ctx->domain = NULL;
} }
pm_runtime_put_sync(qcom_iommu->dev); pm_runtime_put_sync(qcom_iommu->dev);
qcom_domain->iommu = NULL;
} }
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
......
...@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid) ...@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
BUG(); BUG();
} }
static int intel_svm_is_pasid_valid(struct device *dev, int pasid) static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
{ {
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册