提交 7c5047a1 编写于 作者: L Linus Torvalds

Merge tag 'iommu-fixes-v4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:
 "The fixes include:

   - Two patches to revert the use of default domains in the ARM SMMU
     driver.  Enabling this caused regressions which need more thorough
     fixing.  So the regressions are fixed for now by disabling the use
     of default domains.

   - A fix for a v4.4 regression in the AMD IOMMU driver which broke
     devices behind invisible PCIe-to-PCI bridges with IOMMU enabled"

* tag 'iommu-fixes-v4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/arm-smmu: Don't allocate resources for bypass domains
  iommu/arm-smmu: Fix stream-match conflict with IOMMU_DOMAIN_DMA
  iommu/amd: Fix checking of pci dma aliases
......@@ -92,6 +92,7 @@ struct iommu_dev_data {
struct list_head dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
......@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
static inline u16 get_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
return PCI_DEVID(pdev->bus->number, pdev->devfn);
}
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
......@@ -203,6 +211,68 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
return dev_data;
}
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
{
*(u16 *)data = alias;
return 0;
}
static u16 get_alias(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
u16 devid, ivrs_alias, pci_alias;
devid = get_device_id(dev);
ivrs_alias = amd_iommu_alias_table[devid];
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
if (ivrs_alias == pci_alias)
return ivrs_alias;
/*
* DMA alias showdown
*
* The IVRS is fairly reliable in telling us about aliases, but it
* can't know about every screwy device. If we don't have an IVRS
* reported alias, use the PCI reported alias. In that case we may
* still need to initialize the rlookup and dev_table entries if the
* alias is to a non-existent device.
*/
if (ivrs_alias == devid) {
if (!amd_iommu_rlookup_table[pci_alias]) {
amd_iommu_rlookup_table[pci_alias] =
amd_iommu_rlookup_table[devid];
memcpy(amd_iommu_dev_table[pci_alias].data,
amd_iommu_dev_table[devid].data,
sizeof(amd_iommu_dev_table[pci_alias].data));
}
return pci_alias;
}
pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
"for device %s[%04x:%04x], kernel reported alias "
"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
PCI_FUNC(pci_alias));
/*
* If we don't have a PCI DMA alias and the IVRS alias is on the same
* bus, then the IVRS table may know about a quirk that we don't.
*/
if (pci_alias == devid &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
pdev->dma_alias_devfn = ivrs_alias & 0xff;
pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
dev_name(dev));
}
return ivrs_alias;
}
static struct iommu_dev_data *find_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
......@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
return dev_data;
}
static inline u16 get_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
return PCI_DEVID(pdev->bus->number, pdev->devfn);
}
static struct iommu_dev_data *get_dev_data(struct device *dev)
{
return dev->archdata.iommu;
......@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
dev_data->alias = get_alias(dev);
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
......@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
u16 devid, alias;
devid = get_device_id(dev);
alias = amd_iommu_alias_table[devid];
alias = get_alias(dev);
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
......@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = amd_iommu_alias_table[dev_data->devid];
alias = dev_data->alias;
ret = iommu_flush_dte(iommu, dev_data->devid);
if (!ret && alias != dev_data->devid)
......@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = amd_iommu_alias_table[dev_data->devid];
alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
......@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
return;
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = amd_iommu_alias_table[dev_data->devid];
alias = dev_data->alias;
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
......
......@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
/* We're bypassing these SIDs, so don't allocate an actual context */
if (domain->type == IOMMU_DOMAIN_DMA) {
smmu_domain->smmu = smmu;
goto out_unlock;
}
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
......@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base;
int irq;
if (!smmu)
if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
return;
/*
......@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
/* Devices in an IOMMU group may already be configured */
ret = arm_smmu_master_configure_smrs(smmu, cfg);
if (ret)
return ret == -EEXIST ? 0 : ret;
/*
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
* for all devices behind the SMMU.
* for all devices behind the SMMU. Note that we need to take
* care configuring SMRs for devices both a platform_device and
* and a PCI device (i.e. a PCI host controller)
*/
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
return 0;
/* Devices in an IOMMU group may already be configured */
ret = arm_smmu_master_configure_smrs(smmu, cfg);
if (ret)
return ret == -EEXIST ? 0 : ret;
for (i = 0; i < cfg->num_streamids; ++i) {
u32 idx, s2cr;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册