提交 4c923d47 编写于 作者: S Suresh Siddha 提交者: David Woodhouse

iommu: Allocate dma-remapping structures using numa locality info

Allocate dma-remapping structures using numa locality info. On platforms
having remapping hardware units span different nodes, this enables
optimized dma-remapping transalation structures access by remapping hardware.
Signed-off-by: NSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: NDavid Woodhouse <David.Woodhouse@intel.com>
上级 824cd75b
...@@ -277,6 +277,7 @@ static int hw_pass_through = 1; ...@@ -277,6 +277,7 @@ static int hw_pass_through = 1;
struct dmar_domain { struct dmar_domain {
int id; /* domain id */ int id; /* domain id */
int nid; /* node id */
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */ struct list_head devices; /* all devices' list */
...@@ -400,15 +401,18 @@ static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep) ...@@ -400,15 +401,18 @@ static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
} }
static inline void *alloc_pgtable_page(void) static inline void *alloc_pgtable_page(int node)
{ {
unsigned int flags; unsigned int flags;
void *vaddr; struct page *page;
void *vaddr = NULL;
/* trying to avoid low memory issues */ /* trying to avoid low memory issues */
flags = current->flags & PF_MEMALLOC; flags = current->flags & PF_MEMALLOC;
current->flags |= PF_MEMALLOC; current->flags |= PF_MEMALLOC;
vaddr = (void *)get_zeroed_page(GFP_ATOMIC); page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
if (page)
vaddr = page_address(page);
current->flags &= (~PF_MEMALLOC | flags); current->flags &= (~PF_MEMALLOC | flags);
return vaddr; return vaddr;
} }
...@@ -589,7 +593,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, ...@@ -589,7 +593,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
root = &iommu->root_entry[bus]; root = &iommu->root_entry[bus];
context = get_context_addr_from_root(root); context = get_context_addr_from_root(root);
if (!context) { if (!context) {
context = (struct context_entry *)alloc_pgtable_page(); context = (struct context_entry *)
alloc_pgtable_page(iommu->node);
if (!context) { if (!context) {
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
return NULL; return NULL;
...@@ -732,7 +737,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -732,7 +737,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) { if (!dma_pte_present(pte)) {
uint64_t pteval; uint64_t pteval;
tmp_page = alloc_pgtable_page(); tmp_page = alloc_pgtable_page(domain->nid);
if (!tmp_page) if (!tmp_page)
return NULL; return NULL;
...@@ -868,7 +873,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) ...@@ -868,7 +873,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
struct root_entry *root; struct root_entry *root;
unsigned long flags; unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(); root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) if (!root)
return -ENOMEM; return -ENOMEM;
...@@ -1263,6 +1268,7 @@ static struct dmar_domain *alloc_domain(void) ...@@ -1263,6 +1268,7 @@ static struct dmar_domain *alloc_domain(void)
if (!domain) if (!domain)
return NULL; return NULL;
domain->nid = -1;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
domain->flags = 0; domain->flags = 0;
...@@ -1420,9 +1426,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width) ...@@ -1420,9 +1426,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_snooping = 0; domain->iommu_snooping = 0;
domain->iommu_count = 1; domain->iommu_count = 1;
domain->nid = iommu->node;
/* always allocate the top pgd */ /* always allocate the top pgd */
domain->pgd = (struct dma_pte *)alloc_pgtable_page(); domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
if (!domain->pgd) if (!domain->pgd)
return -ENOMEM; return -ENOMEM;
__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
...@@ -1577,6 +1584,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, ...@@ -1577,6 +1584,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
spin_lock_irqsave(&domain->iommu_lock, flags); spin_lock_irqsave(&domain->iommu_lock, flags);
if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
domain->iommu_count++; domain->iommu_count++;
if (domain->iommu_count == 1)
domain->nid = iommu->node;
domain_update_iommu_cap(domain); domain_update_iommu_cap(domain);
} }
spin_unlock_irqrestore(&domain->iommu_lock, flags); spin_unlock_irqrestore(&domain->iommu_lock, flags);
...@@ -3416,6 +3425,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void) ...@@ -3416,6 +3425,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
return NULL; return NULL;
domain->id = vm_domid++; domain->id = vm_domid++;
domain->nid = -1;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
...@@ -3442,9 +3452,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) ...@@ -3442,9 +3452,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_coherency = 0; domain->iommu_coherency = 0;
domain->iommu_snooping = 0; domain->iommu_snooping = 0;
domain->max_addr = 0; domain->max_addr = 0;
domain->nid = -1;
/* always allocate the top pgd */ /* always allocate the top pgd */
domain->pgd = (struct dma_pte *)alloc_pgtable_page(); domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
if (!domain->pgd) if (!domain->pgd)
return -ENOMEM; return -ENOMEM;
domain_flush_cache(domain, domain->pgd, PAGE_SIZE); domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册