提交 8bf47816 编写于 作者: J Joerg Roedel

iommu/vt-d: Split up iommu->domains array

This array is indexed by the domain-id and contains the
pointers to the domains attached to this iommu. Modern
systems support 65536 domain ids, so that this array has a
size of 512kb, per iommu.

This is a huge waste of space, as the array is usually
sparsely populated. This patch makes the array
two-dimensional and allocates the memory for the domain
pointers on-demand.
Signed-off-by: NJoerg Roedel <jroedel@suse.de>
上级 9452d5bf
...@@ -571,13 +571,32 @@ static struct kmem_cache *iommu_devinfo_cache; ...@@ -571,13 +571,32 @@ static struct kmem_cache *iommu_devinfo_cache;
static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
{ {
return iommu->domains[did]; struct dmar_domain **domains;
int idx = did >> 8;
domains = iommu->domains[idx];
if (!domains)
return NULL;
return domains[did & 0xff];
} }
static void set_iommu_domain(struct intel_iommu *iommu, u16 did, static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
struct dmar_domain *domain) struct dmar_domain *domain)
{ {
iommu->domains[did] = domain; struct dmar_domain **domains;
int idx = did >> 8;
if (!iommu->domains[idx]) {
size_t size = 256 * sizeof(struct dmar_domain *);
iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
}
domains = iommu->domains[idx];
if (WARN_ON(!domains))
return;
else
domains[did & 0xff] = domain;
} }
static inline void *alloc_pgtable_page(int node) static inline void *alloc_pgtable_page(int node)
...@@ -1530,35 +1549,43 @@ static void iommu_disable_translation(struct intel_iommu *iommu) ...@@ -1530,35 +1549,43 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
static int iommu_init_domains(struct intel_iommu *iommu) static int iommu_init_domains(struct intel_iommu *iommu)
{ {
unsigned long ndomains; u32 ndomains, nlongs;
unsigned long nlongs; size_t size;
ndomains = cap_ndoms(iommu->cap); ndomains = cap_ndoms(iommu->cap);
pr_debug("%s: Number of Domains supported <%ld>\n", pr_debug("%s: Number of Domains supported <%d>\n",
iommu->name, ndomains); iommu->name, ndomains);
nlongs = BITS_TO_LONGS(ndomains); nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock); spin_lock_init(&iommu->lock);
/* TBD: there might be 64K domains,
* consider other allocation for future chip
*/
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) { if (!iommu->domain_ids) {
pr_err("%s: Allocating domain id array failed\n", pr_err("%s: Allocating domain id array failed\n",
iommu->name); iommu->name);
return -ENOMEM; return -ENOMEM;
} }
iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
GFP_KERNEL); size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
if (!iommu->domains) { iommu->domains = kzalloc(size, GFP_KERNEL);
if (iommu->domains) {
size = 256 * sizeof(struct dmar_domain *);
iommu->domains[0] = kzalloc(size, GFP_KERNEL);
}
if (!iommu->domains || !iommu->domains[0]) {
pr_err("%s: Allocating domain array failed\n", pr_err("%s: Allocating domain array failed\n",
iommu->name); iommu->name);
kfree(iommu->domain_ids); kfree(iommu->domain_ids);
kfree(iommu->domains);
iommu->domain_ids = NULL; iommu->domain_ids = NULL;
iommu->domains = NULL;
return -ENOMEM; return -ENOMEM;
} }
/* /*
* If Caching mode is set, then invalid translations are tagged * If Caching mode is set, then invalid translations are tagged
* with domain-id 0, hence we need to pre-allocate it. We also * with domain-id 0, hence we need to pre-allocate it. We also
...@@ -1600,6 +1627,11 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) ...@@ -1600,6 +1627,11 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu) static void free_dmar_iommu(struct intel_iommu *iommu)
{ {
if ((iommu->domains) && (iommu->domain_ids)) { if ((iommu->domains) && (iommu->domain_ids)) {
int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
int i;
for (i = 0; i < elems; i++)
kfree(iommu->domains[i]);
kfree(iommu->domains); kfree(iommu->domains);
kfree(iommu->domain_ids); kfree(iommu->domain_ids);
iommu->domains = NULL; iommu->domains = NULL;
......
...@@ -344,7 +344,7 @@ struct intel_iommu { ...@@ -344,7 +344,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */ unsigned long *domain_ids; /* bitmap of domains */
struct dmar_domain **domains; /* ptr to domains */ struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */ spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */ struct root_entry *root_entry; /* virtual address */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册