提交 d961db75 编写于 作者: B Ben Skeggs

drm/ttm: restructure to allow driver to plug in alternate memory manager

Nouveau will need this on GeForce 8 and up to account for the GPU
reordering physical VRAM for some memory types.
Reviewed-by: NJerome Glisse <jglisse@redhat.com>
Acked-by: NThomas Hellström <thellstrom@vmware.com>
Signed-off-by: NBen Skeggs <bskeggs@redhat.com>
上级 42311ff9
...@@ -381,6 +381,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -381,6 +381,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED | man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE; TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | man->available_caching = TTM_PL_FLAG_UNCACHED |
...@@ -392,6 +393,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -392,6 +393,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->gpu_offset = 0; man->gpu_offset = 0;
break; break;
case TTM_PL_TT: case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) { switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP: case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
...@@ -494,8 +496,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -494,8 +496,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
u64 src_offset, dst_offset; u64 src_offset, dst_offset;
int ret; int ret;
src_offset = old_mem->mm_node->start << PAGE_SHIFT; src_offset = old_mem->start << PAGE_SHIFT;
dst_offset = new_mem->mm_node->start << PAGE_SHIFT; dst_offset = new_mem->start << PAGE_SHIFT;
if (!nvbo->no_vm) { if (!nvbo->no_vm) {
if (old_mem->mem_type == TTM_PL_VRAM) if (old_mem->mem_type == TTM_PL_VRAM)
src_offset += dev_priv->vm_vram_base; src_offset += dev_priv->vm_vram_base;
...@@ -597,8 +599,8 @@ static int ...@@ -597,8 +599,8 @@ static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{ {
u32 src_offset = old_mem->mm_node->start << PAGE_SHIFT; u32 src_offset = old_mem->start << PAGE_SHIFT;
u32 dst_offset = new_mem->mm_node->start << PAGE_SHIFT; u32 dst_offset = new_mem->start << PAGE_SHIFT;
u32 page_count = new_mem->num_pages; u32 page_count = new_mem->num_pages;
int ret; int ret;
...@@ -746,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, ...@@ -746,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
return 0; return 0;
} }
offset = new_mem->mm_node->start << PAGE_SHIFT; offset = new_mem->start << PAGE_SHIFT;
if (dev_priv->card_type == NV_50) { if (dev_priv->card_type == NV_50) {
ret = nv50_mem_vm_bind_linear(dev, ret = nv50_mem_vm_bind_linear(dev,
...@@ -860,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ...@@ -860,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
case TTM_PL_TT: case TTM_PL_TT:
#if __OS_HAS_AGP #if __OS_HAS_AGP
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->gart_info.aper_base; mem->bus.base = dev_priv->gart_info.aper_base;
mem->bus.is_iomem = true; mem->bus.is_iomem = true;
} }
#endif #endif
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true; mem->bus.is_iomem = true;
break; break;
...@@ -897,7 +899,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -897,7 +899,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} }
/* make sure bo is in mappable vram */ /* make sure bo is in mappable vram */
if (bo->mem.mm_node->start + bo->mem.num_pages < dev_priv->fb_mappable_pages) if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
return 0; return 0;
......
...@@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) ...@@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->gart_info.aper_size, dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RO, &pushbuf, NV_DMA_ACCESS_RO, &pushbuf,
NULL); NULL);
chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else } else
if (dev_priv->card_type != NV_04) { if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size, dev_priv->fb_available_size,
NV_DMA_ACCESS_RO, NV_DMA_ACCESS_RO,
NV_DMA_TARGET_VIDMEM, &pushbuf); NV_DMA_TARGET_VIDMEM, &pushbuf);
chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else { } else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's /* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in * exact reason for existing :) PCI access to cmdbuf in
...@@ -67,7 +67,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) ...@@ -67,7 +67,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->fb_available_size, dev_priv->fb_available_size,
NV_DMA_ACCESS_RO, NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf); NV_DMA_TARGET_PCI, &pushbuf);
chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} }
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
......
...@@ -113,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, ...@@ -113,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM; return -ENOMEM;
} }
offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT; offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
target = NV_DMA_TARGET_VIDMEM; target = NV_DMA_TARGET_VIDMEM;
} else } else
......
...@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) ...@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned i, j, pte; unsigned i, j, pte;
NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
nvbe->pte_start = pte; nvbe->pte_start = pte;
for (i = 0; i < nvbe->nr_pages; i++) { for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i]; dma_addr_t dma_offset = nvbe->pages[i];
......
...@@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) ...@@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, nv_crtc->lut.depth == 8 ? OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON); NV50_EVO_CRTC_CLUT_MODE_ON);
OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start << OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
PAGE_SHIFT) >> 8);
if (dev_priv->chipset != 0x50) { if (dev_priv->chipset != 0x50) {
BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM); OUT_RING(evo, NvEvoVRAM);
......
...@@ -345,7 +345,7 @@ nv50_display_init(struct drm_device *dev) ...@@ -345,7 +345,7 @@ nv50_display_init(struct drm_device *dev)
/* initialise fifo */ /* initialise fifo */
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) | ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
......
...@@ -347,7 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, ...@@ -347,7 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
return ret; return ret;
} }
gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT; gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
return 0; return 0;
} }
......
...@@ -50,7 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, ...@@ -50,7 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
return ret; return ret;
} }
gpuobj->vinst = gpuobj->im_backing->bo.mem.mm_node->start << PAGE_SHIFT; gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
return 0; return 0;
} }
......
...@@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) ...@@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out: out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
bo->tbo.mem.mm_node->start << PAGE_SHIFT, bo->tbo.mem.start << PAGE_SHIFT,
bo->tbo.num_pages << PAGE_SHIFT); bo->tbo.num_pages << PAGE_SHIFT);
return 0; return 0;
} }
...@@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rdev = rbo->rdev; rdev = rbo->rdev;
if (bo->mem.mem_type == TTM_PL_VRAM) { if (bo->mem.mem_type == TTM_PL_VRAM) {
size = bo->mem.num_pages << PAGE_SHIFT; size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.mm_node->start << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT;
if ((offset + size) > rdev->mc.visible_vram_size) { if ((offset + size) > rdev->mc.visible_vram_size) {
/* hurrah the memory is not visible ! */ /* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
...@@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
r = ttm_bo_validate(bo, &rbo->placement, false, true, false); r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
offset = bo->mem.mm_node->start << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT;
/* this should not happen */ /* this should not happen */
if ((offset + size) > rdev->mc.visible_vram_size) if ((offset + size) > rdev->mc.visible_vram_size)
return -EINVAL; return -EINVAL;
......
...@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
break; break;
case TTM_PL_TT: case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.gtt_start; man->gpu_offset = rdev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
...@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
/* "On-card" video ram */ /* "On-card" video ram */
man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.vram_start; man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED | man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE; TTM_MEMTYPE_FLAG_MAPPABLE;
...@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
old_start = old_mem->mm_node->start << PAGE_SHIFT; old_start = old_mem->start << PAGE_SHIFT;
new_start = new_mem->mm_node->start << PAGE_SHIFT; new_start = new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) { switch (old_mem->mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
...@@ -435,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ ...@@ -435,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
#if __OS_HAS_AGP #if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
/* RADEON_IS_AGP is set only if AGP is active */ /* RADEON_IS_AGP is set only if AGP is active */
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = rdev->mc.agp_base; mem->bus.base = rdev->mc.agp_base;
mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
} }
#endif #endif
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */ /* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
return -EINVAL; return -EINVAL;
...@@ -685,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, ...@@ -685,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
int r; int r;
gtt = container_of(backend, struct radeon_ttm_backend, backend); gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; gtt->offset = bo_mem->start << PAGE_SHIFT;
if (!gtt->num_pages) { if (!gtt->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
} }
...@@ -784,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) ...@@ -784,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].show = &radeon_mm_dump_table; radeon_mem_types_list[i].show = &radeon_mm_dump_table;
radeon_mem_types_list[i].driver_features = 0; radeon_mem_types_list[i].driver_features = 0;
if (i == 0) if (i == 0)
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
else else
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
} }
/* Add ttm page pool to debugfs */ /* Add ttm page pool to debugfs */
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
ccflags-y := -Iinclude/drm ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
ttm_bo_manager.o
obj-$(CONFIG_DRM_TTM) += ttm.o obj-$(CONFIG_DRM_TTM) += ttm.o
...@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) ...@@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{ {
struct ttm_agp_backend *agp_be = struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend); container_of(backend, struct ttm_agp_backend, backend);
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem = agp_be->mem; struct agp_memory *mem = agp_be->mem;
int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
int ret; int ret;
...@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) ...@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
mem->is_flushed = 1; mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
ret = agp_bind_memory(mem, bo_mem->mm_node->start); ret = agp_bind_memory(mem, node->start);
if (ret) if (ret)
printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
......
...@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) ...@@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
man->available_caching); man->available_caching);
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
man->default_caching); man->default_caching);
if (mem_type != TTM_PL_SYSTEM) { if (mem_type != TTM_PL_SYSTEM)
spin_lock(&bdev->glob->lru_lock); (*man->func->debug)(man, TTM_PFX);
drm_mm_debug_table(&man->manager, TTM_PFX);
spin_unlock(&bdev->glob->lru_lock);
}
} }
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
...@@ -421,7 +418,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -421,7 +418,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (bo->mem.mm_node) { if (bo->mem.mm_node) {
spin_lock(&bo->lock); spin_lock(&bo->lock);
bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset; bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement; bo->cur_placement = bo->mem.placement;
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
...@@ -724,52 +721,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -724,52 +721,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret; return ret;
} }
static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
struct drm_mm_node **node)
{
struct ttm_bo_global *glob = bo->glob;
unsigned long lpfn;
int ret;
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
*node = NULL;
do {
ret = drm_mm_pre_get(&man->manager);
if (unlikely(ret))
return ret;
spin_lock(&glob->lru_lock);
*node = drm_mm_search_free_in_range(&man->manager,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(*node == NULL)) {
spin_unlock(&glob->lru_lock);
return 0;
}
*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&glob->lru_lock);
} while (*node == NULL);
return 0;
}
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{ {
struct ttm_bo_global *glob = bo->glob; struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
if (mem->mm_node) { if (mem->mm_node)
spin_lock(&glob->lru_lock); (*man->func->put_node)(man, mem);
drm_mm_put_block(mem->mm_node);
spin_unlock(&glob->lru_lock);
mem->mm_node = NULL;
}
} }
EXPORT_SYMBOL(ttm_bo_mem_put); EXPORT_SYMBOL(ttm_bo_mem_put);
...@@ -788,14 +745,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -788,14 +745,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob; struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct drm_mm_node *node;
int ret; int ret;
do { do {
ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
if (node) if (mem->mm_node)
break; break;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
if (list_empty(&man->lru)) { if (list_empty(&man->lru)) {
...@@ -808,9 +764,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -808,9 +764,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} while (1); } while (1);
if (node == NULL) if (mem->mm_node == NULL)
return -ENOMEM; return -ENOMEM;
mem->mm_node = node;
mem->mem_type = mem_type; mem->mem_type = mem_type;
return 0; return 0;
} }
...@@ -884,7 +839,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -884,7 +839,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false; bool type_found = false;
bool type_ok = false; bool type_ok = false;
bool has_erestartsys = false; bool has_erestartsys = false;
struct drm_mm_node *node = NULL;
int i, ret; int i, ret;
mem->mm_node = NULL; mem->mm_node = NULL;
...@@ -918,17 +872,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -918,17 +872,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (man->has_type && man->use_type) { if (man->has_type && man->use_type) {
type_found = true; type_found = true;
ret = ttm_bo_man_get_node(bo, man, placement, mem, ret = (*man->func->get_node)(man, bo, placement, mem);
&node);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
} }
if (node) if (mem->mm_node)
break; break;
} }
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
mem->mm_node = node;
mem->mem_type = mem_type; mem->mem_type = mem_type;
mem->placement = cur_flags; mem->placement = cur_flags;
return 0; return 0;
...@@ -998,7 +950,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -998,7 +950,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_reserve, bool interruptible, bool no_wait_reserve,
bool no_wait_gpu) bool no_wait_gpu)
{ {
struct ttm_bo_global *glob = bo->glob;
int ret = 0; int ret = 0;
struct ttm_mem_reg mem; struct ttm_mem_reg mem;
...@@ -1026,11 +977,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -1026,11 +977,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
goto out_unlock; goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock: out_unlock:
if (ret && mem.mm_node) { if (ret && mem.mm_node)
spin_lock(&glob->lru_lock); ttm_bo_mem_put(bo, &mem);
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
}
return ret; return ret;
} }
...@@ -1038,11 +986,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, ...@@ -1038,11 +986,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
int i; int i;
struct drm_mm_node *node = mem->mm_node;
if (node && placement->lpfn != 0 && if (mem->mm_node && placement->lpfn != 0 &&
(node->start < placement->fpfn || (mem->start < placement->fpfn ||
node->start + node->size > placement->lpfn)) mem->start + mem->num_pages > placement->lpfn))
return -1; return -1;
for (i = 0; i < placement->num_placement; i++) { for (i = 0; i < placement->num_placement; i++) {
...@@ -1286,7 +1233,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, ...@@ -1286,7 +1233,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{ {
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -1309,13 +1255,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ...@@ -1309,13 +1255,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
if (mem_type > 0) { if (mem_type > 0) {
ttm_bo_force_list_clean(bdev, mem_type, false); ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock); ret = (*man->func->takedown)(man);
if (drm_mm_clean(&man->manager))
drm_mm_takedown(&man->manager);
else
ret = -EBUSY;
spin_unlock(&glob->lru_lock);
} }
return ret; return ret;
...@@ -1366,6 +1306,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ...@@ -1366,6 +1306,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
ret = bdev->driver->init_mem_type(bdev, type, man); ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret) if (ret)
return ret; return ret;
man->bdev = bdev;
ret = 0; ret = 0;
if (type != TTM_PL_SYSTEM) { if (type != TTM_PL_SYSTEM) {
...@@ -1375,7 +1316,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ...@@ -1375,7 +1316,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type); type);
return ret; return ret;
} }
ret = drm_mm_init(&man->manager, 0, p_size);
ret = (*man->func->init)(man, p_size);
if (ret) if (ret)
return ret; return ret;
} }
......
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct ttm_bo_global *glob = man->bdev->glob;
struct drm_mm *mm = man->priv;
struct drm_mm_node *node = NULL;
unsigned long lpfn;
int ret;
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
do {
ret = drm_mm_pre_get(mm);
if (unlikely(ret))
return ret;
spin_lock(&glob->lru_lock);
node = drm_mm_search_free_in_range(mm,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(node == NULL)) {
spin_unlock(&glob->lru_lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&glob->lru_lock);
} while (node == NULL);
mem->mm_node = node;
mem->start = node->start;
return 0;
}
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct ttm_bo_global *glob = man->bdev->glob;
if (mem->mm_node) {
spin_lock(&glob->lru_lock);
drm_mm_put_block(mem->mm_node);
spin_unlock(&glob->lru_lock);
mem->mm_node = NULL;
}
}
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct drm_mm *mm;
int ret;
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
if (!mm)
return -ENOMEM;
ret = drm_mm_init(mm, 0, p_size);
if (ret) {
kfree(mm);
return ret;
}
man->priv = mm;
return 0;
}
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
{
struct ttm_bo_global *glob = man->bdev->glob;
struct drm_mm *mm = man->priv;
int ret = 0;
spin_lock(&glob->lru_lock);
if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
kfree(mm);
man->priv = NULL;
} else
ret = -EBUSY;
spin_unlock(&glob->lru_lock);
return ret;
}
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct ttm_bo_global *glob = man->bdev->glob;
struct drm_mm *mm = man->priv;
spin_lock(&glob->lru_lock);
drm_mm_debug_table(mm, prefix);
spin_unlock(&glob->lru_lock);
}
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
ttm_bo_man_init,
ttm_bo_man_takedown,
ttm_bo_man_get_node,
ttm_bo_man_put_node,
ttm_bo_man_debug
};
EXPORT_SYMBOL(ttm_bo_manager_func);
...@@ -256,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -256,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
dir = 1; dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) && if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start < (new_mem->start < old_mem->start + old_mem->size)) {
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = -1; dir = -1;
add = new_mem->num_pages - 1; add = new_mem->num_pages - 1;
} }
......
...@@ -147,6 +147,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ...@@ -147,6 +147,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
/* "On-card" video ram */ /* "On-card" video ram */
man->func = &ttm_bo_manager_func;
man->gpu_offset = 0; man->gpu_offset = 0;
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
...@@ -203,7 +204,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg ...@@ -203,7 +204,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
/* System memory */ /* System memory */
return 0; return 0;
case TTM_PL_VRAM: case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->vram_start; mem->bus.base = dev_priv->vram_start;
mem->bus.is_iomem = true; mem->bus.is_iomem = true;
break; break;
......
...@@ -102,7 +102,8 @@ struct ttm_bus_placement { ...@@ -102,7 +102,8 @@ struct ttm_bus_placement {
*/ */
struct ttm_mem_reg { struct ttm_mem_reg {
struct drm_mm_node *mm_node; void *mm_node;
unsigned long start;
unsigned long size; unsigned long size;
unsigned long num_pages; unsigned long num_pages;
uint32_t page_alignment; uint32_t page_alignment;
......
...@@ -203,7 +203,22 @@ struct ttm_tt { ...@@ -203,7 +203,22 @@ struct ttm_tt {
* It's set up by the ttm_bo_driver::init_mem_type method. * It's set up by the ttm_bo_driver::init_mem_type method.
*/ */
struct ttm_mem_type_manager;
struct ttm_mem_type_manager_func {
int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
int (*takedown)(struct ttm_mem_type_manager *man);
int (*get_node)(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem);
void (*put_node)(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem);
void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
};
struct ttm_mem_type_manager { struct ttm_mem_type_manager {
struct ttm_bo_device *bdev;
/* /*
* No protection. Constant from start. * No protection. Constant from start.
...@@ -222,8 +237,8 @@ struct ttm_mem_type_manager { ...@@ -222,8 +237,8 @@ struct ttm_mem_type_manager {
* TODO: Consider one lru_lock per ttm_mem_type_manager. * TODO: Consider one lru_lock per ttm_mem_type_manager.
* Plays ill with list removal, though. * Plays ill with list removal, though.
*/ */
const struct ttm_mem_type_manager_func *func;
struct drm_mm manager; void *priv;
struct list_head lru; struct list_head lru;
}; };
...@@ -895,6 +910,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, ...@@ -895,6 +910,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/ */
extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define TTM_HAS_AGP #define TTM_HAS_AGP
#include <linux/agp_backend.h> #include <linux/agp_backend.h>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册