提交 134fdc1a 编写于 作者: B Ben Skeggs

drm/nouveau/core/mm: replace region list with next pointer

We never have any need for a double-linked list here, and as there's
generally a large number of these objects, replace it with a single-
linked list in order to save some memory.
Signed-off-by: NBen Skeggs <bskeggs@redhat.com>
上级 04b88677
......@@ -5,7 +5,7 @@
struct nvkm_mm_node {
struct list_head nl_entry;
struct list_head fl_entry;
struct list_head rl_entry;
struct nvkm_mm_node *next;
#define NVKM_MM_HEAP_ANY 0x00
u8 heap;
......@@ -38,4 +38,10 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
u32 size_min, u32 align, struct nvkm_mm_node **);
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
void nvkm_mm_dump(struct nvkm_mm *, const char *);
static inline bool
nvkm_mm_contiguous(struct nvkm_mm_node *node)
{
return !node->next;
}
#endif
......@@ -29,7 +29,7 @@ struct nvkm_mem {
u8 page_shift;
struct nvkm_mm_node *tag;
struct list_head regions;
struct nvkm_mm_node *mem;
dma_addr_t *pages;
u32 memtype;
u64 offset;
......
......@@ -321,7 +321,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
if (bo->mem.mem_type == TTM_PL_VRAM) {
struct nvkm_mem *mem = bo->mem.mm_node;
if (!list_is_singular(&mem->regions))
if (!nvkm_mm_contiguous(mem->mem))
evict = true;
}
nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
......
......@@ -147,6 +147,7 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
if (!this)
return -ENOMEM;
this->next = NULL;
this->type = type;
list_del(&this->fl_entry);
*pnode = this;
......@@ -225,6 +226,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
if (!this)
return -ENOMEM;
this->next = NULL;
this->type = type;
list_del(&this->fl_entry);
*pnode = this;
......
......@@ -445,7 +445,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
{
struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
struct nvkm_mm *mm = &ram->vram;
struct nvkm_mm_node *r;
struct nvkm_mm_node **node, *r;
struct nvkm_mem *mem;
int type = (memtype & 0x0ff);
int back = (memtype & 0x800);
......@@ -462,7 +462,6 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
if (!mem)
return -ENOMEM;
INIT_LIST_HEAD(&mem->regions);
mem->size = size;
mutex_lock(&ram->fb->subdev.mutex);
......@@ -478,6 +477,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
}
mem->memtype = type;
node = &mem->mem;
do {
if (back)
ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
......@@ -489,13 +489,13 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
return ret;
}
list_add_tail(&r->rl_entry, &mem->regions);
*node = r;
node = &r->next;
size -= r->length;
} while (size);
mutex_unlock(&ram->fb->subdev.mutex);
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
*pmem = mem;
return 0;
}
......
......@@ -496,15 +496,12 @@ nv50_ram_tidy(struct nvkm_ram *base)
void
__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
{
struct nvkm_mm_node *this;
while (!list_empty(&mem->regions)) {
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
list_del(&this->rl_entry);
nvkm_mm_free(&ram->vram, &this);
struct nvkm_mm_node *next = mem->mem;
struct nvkm_mm_node *node;
while ((node = next)) {
next = node->next;
nvkm_mm_free(&ram->vram, &node);
}
nvkm_mm_free(&ram->tags, &mem->tag);
}
......@@ -530,7 +527,7 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
{
struct nvkm_mm *heap = &ram->vram;
struct nvkm_mm *tags = &ram->tags;
struct nvkm_mm_node *r;
struct nvkm_mm_node **node, *r;
struct nvkm_mem *mem;
int comp = (memtype & 0x300) >> 8;
int type = (memtype & 0x07f);
......@@ -559,11 +556,11 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
comp = 0;
}
INIT_LIST_HEAD(&mem->regions);
mem->memtype = (comp << 7) | type;
mem->size = max;
type = nv50_fb_memtype[type];
node = &mem->mem;
do {
if (back)
ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
......@@ -575,13 +572,13 @@ nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
return ret;
}
list_add_tail(&r->rl_entry, &mem->regions);
*node = r;
node = &r->next;
max -= r->length;
} while (max);
mutex_unlock(&ram->fb->subdev.mutex);
r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
*pmem = mem;
return 0;
}
......
......@@ -305,11 +305,11 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r;
struct nvkm_mm_node *r = node->base.mem.mem;
unsigned long flags;
int i;
if (unlikely(list_empty(&node->base.mem.regions)))
if (unlikely(!r))
goto out;
spin_lock_irqsave(&imem->lock, flags);
......@@ -320,9 +320,6 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
spin_unlock_irqrestore(&imem->lock, flags);
r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
rl_entry);
/* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
......@@ -404,10 +401,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node->r.length = (npages << PAGE_SHIFT) >> 12;
node->base.mem.offset = node->handle;
INIT_LIST_HEAD(&node->base.mem.regions);
list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
node->base.mem.mem = &node->r;
return 0;
}
......@@ -484,10 +478,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
INIT_LIST_HEAD(&node->base.mem.regions);
list_add_tail(&r->rl_entry, &node->base.mem.regions);
node->base.mem.mem = r;
return 0;
release_area:
......
......@@ -31,7 +31,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
{
struct nvkm_vm *vm = vma->vm;
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_mm_node *r;
struct nvkm_mm_node *r = node->mem;
int big = vma->node->type != mmu->func->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
......@@ -41,7 +41,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
u32 end, len;
delta = 0;
list_for_each_entry(r, &node->regions, rl_entry) {
while (r) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
......@@ -65,7 +65,8 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
delta += (u64)len << vma->node->type;
}
}
r = r->next;
};
mmu->func->flush(vm);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册