提交 19943b0e 编写于 作者: D David Woodhouse

intel-iommu: Unify hardware and software passthrough support

This makes the hardware passthrough mode work a lot more like the
software version, so that the behaviour of a kernel with 'iommu=pt'
is the same whether the hardware supports passthrough or not.

In particular:
 - We use a single si_domain for the pass-through devices.
 - 32-bit devices can be taken out of the pass-through domain so that
   they don't have to use swiotlb.
 - Devices will work again after being removed from a KVM guest.
 - A potential oops on OOM (in init_context_pass_through()) is fixed.
Signed-off-by: NDavid Woodhouse <David.Woodhouse@intel.com>
上级 0815565a
...@@ -71,8 +71,7 @@ void __init pci_swiotlb_init(void) ...@@ -71,8 +71,7 @@ void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) || if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
iommu_pass_through)
swiotlb = 1; swiotlb = 1;
#endif #endif
if (swiotlb_force) if (swiotlb_force)
......
...@@ -251,7 +251,8 @@ static inline int first_pte_in_page(struct dma_pte *pte) ...@@ -251,7 +251,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
* 2. It maps to each iommu if successful. * 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful. * 3. Each iommu mapps to this domain if successful.
*/ */
struct dmar_domain *si_domain; static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
/* devices under the same p2p bridge are owned in one domain */ /* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
...@@ -1948,14 +1949,24 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, ...@@ -1948,14 +1949,24 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
struct dmar_domain *domain; struct dmar_domain *domain;
int ret; int ret;
printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain) if (!domain)
return -ENOMEM; return -ENOMEM;
/* For _hardware_ passthrough, don't bother. But for software
passthrough, we do it anyway -- it may indicate a memory
range which is reserved in E820, so which didn't get set
up to start with in si_domain */
if (domain == si_domain && hw_pass_through) {
printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
return 0;
}
printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
ret = iommu_domain_identity_map(domain, start, end); ret = iommu_domain_identity_map(domain, start, end);
if (ret) if (ret)
goto error; goto error;
...@@ -2006,23 +2017,6 @@ static inline void iommu_prepare_isa(void) ...@@ -2006,23 +2017,6 @@ static inline void iommu_prepare_isa(void)
} }
#endif /* !CONFIG_DMAR_FLPY_WA */ #endif /* !CONFIG_DMAR_FLPY_WA */
/* Initialize each context entry as pass through.*/
static int __init init_context_pass_through(void)
{
struct pci_dev *pdev = NULL;
struct dmar_domain *domain;
int ret;
for_each_pci_dev(pdev) {
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
ret = domain_context_mapping(domain, pdev,
CONTEXT_TT_PASS_THROUGH);
if (ret)
return ret;
}
return 0;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width); static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_work_fn(unsigned long start_pfn, static int __init si_domain_work_fn(unsigned long start_pfn,
...@@ -2037,7 +2031,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn, ...@@ -2037,7 +2031,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
} }
static int si_domain_init(void) static int si_domain_init(int hw)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu; struct intel_iommu *iommu;
...@@ -2064,6 +2058,9 @@ static int si_domain_init(void) ...@@ -2064,6 +2058,9 @@ static int si_domain_init(void)
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
if (hw)
return 0;
for_each_online_node(nid) { for_each_online_node(nid) {
work_with_active_regions(nid, si_domain_work_fn, &ret); work_with_active_regions(nid, si_domain_work_fn, &ret);
if (ret) if (ret)
...@@ -2155,24 +2152,26 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) ...@@ -2155,24 +2152,26 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
return 1; return 1;
} }
static int iommu_prepare_static_identity_mapping(void) static int iommu_prepare_static_identity_mapping(int hw)
{ {
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
int ret; int ret;
ret = si_domain_init(); ret = si_domain_init(hw);
if (ret) if (ret)
return -EFAULT; return -EFAULT;
for_each_pci_dev(pdev) { for_each_pci_dev(pdev) {
if (iommu_should_identity_map(pdev, 1)) { if (iommu_should_identity_map(pdev, 1)) {
printk(KERN_INFO "IOMMU: identity mapping for device %s\n", printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
pci_name(pdev)); hw ? "hardware" : "software", pci_name(pdev));
ret = domain_context_mapping(si_domain, pdev, ret = domain_context_mapping(si_domain, pdev,
hw ? CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL); CONTEXT_TT_MULTI_LEVEL);
if (ret) if (ret)
return ret; return ret;
ret = domain_add_dev_info(si_domain, pdev); ret = domain_add_dev_info(si_domain, pdev);
if (ret) if (ret)
return ret; return ret;
...@@ -2189,14 +2188,6 @@ int __init init_dmars(void) ...@@ -2189,14 +2188,6 @@ int __init init_dmars(void)
struct pci_dev *pdev; struct pci_dev *pdev;
struct intel_iommu *iommu; struct intel_iommu *iommu;
int i, ret; int i, ret;
int pass_through = 1;
/*
* In case pass through can not be enabled, iommu tries to use identity
* mapping.
*/
if (iommu_pass_through)
iommu_identity_mapping = 1;
/* /*
* for each drhd * for each drhd
...@@ -2250,13 +2241,7 @@ int __init init_dmars(void) ...@@ -2250,13 +2241,7 @@ int __init init_dmars(void)
goto error; goto error;
} }
if (!ecap_pass_through(iommu->ecap)) if (!ecap_pass_through(iommu->ecap))
pass_through = 0; hw_pass_through = 0;
}
if (iommu_pass_through)
if (!pass_through) {
printk(KERN_INFO
"Pass Through is not supported by hardware.\n");
iommu_pass_through = 0;
} }
/* /*
...@@ -2312,30 +2297,24 @@ int __init init_dmars(void) ...@@ -2312,30 +2297,24 @@ int __init init_dmars(void)
} }
} }
/* if (iommu_pass_through)
* If pass through is set and enabled, context entries of all pci iommu_identity_mapping = 1;
* devices are intialized by pass through translation type. #ifdef CONFIG_DMAR_BROKEN_GFX_WA
*/ else
if (iommu_pass_through) { iommu_identity_mapping = 2;
ret = init_context_pass_through(); #endif
if (ret) {
printk(KERN_ERR "IOMMU: Pass through init failed.\n");
iommu_pass_through = 0;
}
}
/* /*
* If pass through is not set or not enabled, setup context entries for * If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa and may fall back to static * identity mappings for rmrr, gfx, and isa and may fall back to static
* identity mapping if iommu_identity_mapping is set. * identity mapping if iommu_identity_mapping is set.
*/ */
if (!iommu_pass_through) { if (iommu_identity_mapping) {
#ifdef CONFIG_DMAR_BROKEN_GFX_WA ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (!iommu_identity_mapping) if (ret) {
iommu_identity_mapping = 2; printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
#endif goto error;
if (iommu_identity_mapping) }
iommu_prepare_static_identity_mapping(); }
/* /*
* For each rmrr * For each rmrr
* for each dev attached to rmrr * for each dev attached to rmrr
...@@ -2368,7 +2347,6 @@ int __init init_dmars(void) ...@@ -2368,7 +2347,6 @@ int __init init_dmars(void)
} }
iommu_prepare_isa(); iommu_prepare_isa();
}
/* /*
* for each drhd * for each drhd
...@@ -2536,7 +2514,10 @@ static int iommu_no_mapping(struct device *dev) ...@@ -2536,7 +2514,10 @@ static int iommu_no_mapping(struct device *dev)
ret = domain_add_dev_info(si_domain, pdev); ret = domain_add_dev_info(si_domain, pdev);
if (ret) if (ret)
return 0; return 0;
ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); ret = domain_context_mapping(si_domain, pdev,
hw_pass_through ?
CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL);
if (!ret) { if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n", printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev)); pci_name(pdev));
...@@ -3202,7 +3183,7 @@ int __init intel_iommu_init(void) ...@@ -3202,7 +3183,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now. * Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping. * Above initialization will also be used by Interrupt-remapping.
*/ */
if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) if (no_iommu || swiotlb || dmar_disabled)
return -ENODEV; return -ENODEV;
iommu_init_mempool(); iommu_init_mempool();
...@@ -3222,14 +3203,7 @@ int __init intel_iommu_init(void) ...@@ -3222,14 +3203,7 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer); init_timer(&unmap_timer);
force_iommu = 1; force_iommu = 1;
if (!iommu_pass_through) {
printk(KERN_INFO
"Multi-level page-table translation for DMAR.\n");
dma_ops = &intel_dma_ops; dma_ops = &intel_dma_ops;
} else
printk(KERN_INFO
"DMAR: Pass through translation for DMAR.\n");
init_iommu_sysfs(); init_iommu_sysfs();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册