提交 d5f42394 编写于 作者: B Ben Skeggs

drm/nouveau: rename nouveau_vram to nouveau_mem

This structure will also be used for GART in the near future.
Signed-off-by: NBen Skeggs <bskeggs@redhat.com>
上级 b5e2f076
......@@ -509,7 +509,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node;
struct nouveau_mem *node = old_mem->mm_node;
src_offset = node->tmp_vma.offset;
} else {
src_offset += dev_priv->gart_info.aper_base;
......@@ -562,7 +562,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node;
struct nouveau_mem *node = old_mem->mm_node;
src_offset = node->tmp_vma.offset;
} else {
src_offset += dev_priv->gart_info.aper_base;
......@@ -729,7 +729,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
* up after ttm destroys the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50 && old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node;
struct nouveau_mem *node = old_mem->mm_node;
ret = nouveau_vm_get(chan->vm, old_mem->num_pages << PAGE_SHIFT,
nvbo->vma.node->type, NV_MEM_ACCESS_RO,
......@@ -972,7 +972,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
case TTM_PL_VRAM:
{
struct nouveau_vram *vram = mem->mm_node;
struct nouveau_mem *node = mem->mm_node;
u8 page_shift;
if (!dev_priv->bar1_vm) {
......@@ -983,23 +983,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
}
if (dev_priv->card_type == NV_C0)
page_shift = vram->page_shift;
page_shift = node->page_shift;
else
page_shift = 12;
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
page_shift, NV_MEM_ACCESS_RW,
&vram->bar_vma);
&node->bar_vma);
if (ret)
return ret;
nouveau_vm_map(&vram->bar_vma, vram);
nouveau_vm_map(&node->bar_vma, node);
if (ret) {
nouveau_vm_put(&vram->bar_vma);
nouveau_vm_put(&node->bar_vma);
return ret;
}
mem->bus.offset = vram->bar_vma.offset;
mem->bus.offset = node->bar_vma.offset;
if (dev_priv->card_type == NV_50) /*XXX*/
mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
......@@ -1016,16 +1016,16 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct nouveau_vram *vram = mem->mm_node;
struct nouveau_mem *node = mem->mm_node;
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
return;
if (!vram->bar_vma.node)
if (!node->bar_vma.node)
return;
nouveau_vm_unmap(&vram->bar_vma);
nouveau_vm_put(&vram->bar_vma);
nouveau_vm_unmap(&node->bar_vma);
nouveau_vm_put(&node->bar_vma);
}
static int
......
......@@ -57,7 +57,7 @@ struct nouveau_fpriv {
#include "nouveau_util.h"
struct nouveau_grctx;
struct nouveau_vram;
struct nouveau_mem;
#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
......@@ -65,7 +65,7 @@ struct nouveau_vram;
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
struct nouveau_vram {
struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
......@@ -510,8 +510,8 @@ struct nouveau_crypt_engine {
struct nouveau_vram_engine {
int (*init)(struct drm_device *);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
u32 type, struct nouveau_vram **);
void (*put)(struct drm_device *, struct nouveau_vram **);
u32 type, struct nouveau_mem **);
void (*put)(struct drm_device *, struct nouveau_mem **);
bool (*flags_valid)(struct drm_device *, u32 tile_flags);
};
......
......@@ -710,7 +710,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct nouveau_vram *node = mem->mm_node;
struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
if (node->tmp_vma.node) {
......@@ -718,7 +718,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
nouveau_vm_put(&node->tmp_vma);
}
vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
static int
......@@ -731,7 +731,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vram *node;
struct nouveau_mem *node;
u32 size_nc = 0;
int ret;
......
......@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_vram **);
void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
u32 memtype, struct nouveau_mem **);
void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
int nvc0_vram_init(struct drm_device *);
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
u32 memtype, struct nouveau_vram **);
u32 memtype, struct nouveau_mem **);
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
#endif
......@@ -28,7 +28,7 @@
#include "nouveau_vm.h"
void
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
{
struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r;
......@@ -40,7 +40,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
list_for_each_entry(r, &vram->regions, rl_entry) {
list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
......@@ -52,7 +52,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
end = max;
len = end - pte;
vm->map(vma, pgt, vram, pte, len, phys);
vm->map(vma, pgt, node, pte, len, phys);
num -= len;
pte += len;
......@@ -67,9 +67,9 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
}
void
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
nouveau_vm_map_at(vma, 0, vram);
nouveau_vm_map_at(vma, 0, node);
}
void
......
......@@ -67,7 +67,7 @@ struct nouveau_vm {
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
......@@ -82,8 +82,8 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
u32 access, struct nouveau_vma *);
void nouveau_vm_put(struct nouveau_vma *);
void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
......@@ -93,7 +93,7 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
......@@ -104,7 +104,7 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
struct nouveau_mem *, u32 pte, u32 cnt, u64 phys);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
u32 pte, dma_addr_t *, u32 cnt);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
......
......@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev)
}
struct nv50_gpuobj_node {
struct nouveau_vram *vram;
struct nouveau_mem *vram;
struct nouveau_vma chan_vma;
u32 align;
};
......
......@@ -84,7 +84,7 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
{
u32 block;
int i;
......
......@@ -48,42 +48,42 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
}
void
nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this;
struct nouveau_vram *vram;
struct nouveau_mem *mem;
vram = *pvram;
*pvram = NULL;
if (unlikely(vram == NULL))
mem = *pmem;
*pmem = NULL;
if (unlikely(mem == NULL))
return;
mutex_lock(&mm->mutex);
while (!list_empty(&vram->regions)) {
this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
while (!list_empty(&mem->regions)) {
this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
list_del(&this->rl_entry);
nouveau_mm_put(mm, this);
}
mutex_unlock(&mm->mutex);
kfree(vram);
kfree(mem);
}
int
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
u32 type, struct nouveau_vram **pvram)
u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
struct nouveau_vram *vram;
struct nouveau_mem *mem;
int ret;
if (!types[type])
......@@ -92,32 +92,32 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
align >>= 12;
size_nc >>= 12;
vram = kzalloc(sizeof(*vram), GFP_KERNEL);
if (!vram)
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
INIT_LIST_HEAD(&vram->regions);
vram->dev = dev_priv->dev;
vram->memtype = type;
vram->size = size;
INIT_LIST_HEAD(&mem->regions);
mem->dev = dev_priv->dev;
mem->memtype = type;
mem->size = size;
mutex_lock(&mm->mutex);
do {
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
nv50_vram_del(dev, &vram);
nv50_vram_del(dev, &mem);
return ret;
}
list_add_tail(&r->rl_entry, &vram->regions);
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
vram->offset = (u64)r->offset << 12;
*pvram = vram;
r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
*pmem = mem;
return 0;
}
......
......@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
void
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys)
{
u32 next = 1 << (vma->node->type - 8);
......
......@@ -58,46 +58,46 @@ nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
int
nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
u32 type, struct nouveau_vram **pvram)
u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
struct nouveau_vram *vram;
struct nouveau_mem *mem;
int ret;
size >>= 12;
align >>= 12;
ncmin >>= 12;
vram = kzalloc(sizeof(*vram), GFP_KERNEL);
if (!vram)
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem)
return -ENOMEM;
INIT_LIST_HEAD(&vram->regions);
vram->dev = dev_priv->dev;
vram->memtype = type;
vram->size = size;
INIT_LIST_HEAD(&mem->regions);
mem->dev = dev_priv->dev;
mem->memtype = type;
mem->size = size;
mutex_lock(&mm->mutex);
do {
ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
nv50_vram_del(dev, &vram);
nv50_vram_del(dev, &mem);
return ret;
}
list_add_tail(&r->rl_entry, &vram->regions);
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
vram->offset = (u64)r->offset << 12;
*pvram = vram;
r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
*pmem = mem;
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册