提交 1de68568 编写于 作者: B Ben Skeggs

drm/nouveau/mmu: protect each vm with its own mutex

An upcoming commit requires being able to modify the PRAMIN BAR page
tables while already holding the MMU subdev mutex.

To solve this issue, each VM has been given its own mutex.  As a nice
side-effect, this also allows separate VMs to be updated concurrently.
Signed-off-by: NBen Skeggs <bskeggs@redhat.com>
上级 524bdbf2
...@@ -26,6 +26,8 @@ struct nvkm_vma { ...@@ -26,6 +26,8 @@ struct nvkm_vma {
struct nvkm_vm { struct nvkm_vm {
struct nvkm_mmu *mmu; struct nvkm_mmu *mmu;
struct mutex mutex;
struct nvkm_mm mm; struct nvkm_mm mm;
struct kref refcount; struct kref refcount;
...@@ -47,7 +49,8 @@ struct nvkm_mmu { ...@@ -47,7 +49,8 @@ struct nvkm_mmu {
u8 lpg_shift; u8 lpg_shift;
int (*create)(struct nvkm_mmu *, u64 offset, u64 length, int (*create)(struct nvkm_mmu *, u64 offset, u64 length,
u64 mm_offset, struct nvkm_vm **); u64 mm_offset, struct lock_class_key *,
struct nvkm_vm **);
void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde, void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
struct nvkm_gpuobj *pgt[2]); struct nvkm_gpuobj *pgt[2]);
...@@ -85,14 +88,14 @@ extern struct nvkm_oclass nv44_mmu_oclass; ...@@ -85,14 +88,14 @@ extern struct nvkm_oclass nv44_mmu_oclass;
extern struct nvkm_oclass nv50_mmu_oclass; extern struct nvkm_oclass nv50_mmu_oclass;
extern struct nvkm_oclass gf100_mmu_oclass; extern struct nvkm_oclass gf100_mmu_oclass;
int nv04_vm_create(struct nvkm_mmu *, u64, u64, u64, int nv04_vm_create(struct nvkm_mmu *, u64, u64, u64, struct lock_class_key *,
struct nvkm_vm **); struct nvkm_vm **);
void nv04_mmu_dtor(struct nvkm_object *); void nv04_mmu_dtor(struct nvkm_object *);
int nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset, int nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
u32 block, struct nvkm_vm **); u32 block, struct lock_class_key *, struct nvkm_vm **);
int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset, int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
struct nvkm_vm **); struct lock_class_key *, struct nvkm_vm **);
int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd); int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access, int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *); struct nvkm_vma *);
......
...@@ -416,7 +416,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -416,7 +416,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
0x1000, &drm->client.vm); 0x1000, NULL, &drm->client.vm);
if (ret) if (ret)
goto fail_device; goto fail_device;
...@@ -809,7 +809,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) ...@@ -809,7 +809,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
0x1000, &cli->vm); 0x1000, NULL, &cli->vm);
if (ret) { if (ret) {
nouveau_cli_destroy(cli); nouveau_cli_destroy(cli);
goto out_suspend; goto out_suspend;
......
...@@ -77,9 +77,10 @@ gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma) ...@@ -77,9 +77,10 @@ gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
nvkm_vm_put(vma); nvkm_vm_put(vma);
} }
static int static int
gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm, gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
int bar_nr) struct lock_class_key *key, int bar_nr)
{ {
struct nvkm_device *device = nv_device(&bar->base); struct nvkm_device *device = nv_device(&bar->base);
struct nvkm_vm *vm; struct nvkm_vm *vm;
...@@ -98,7 +99,7 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm, ...@@ -98,7 +99,7 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
bar_len = nv_device_resource_len(device, bar_nr); bar_len = nv_device_resource_len(device, bar_nr);
ret = nvkm_vm_new(device, 0, bar_len, 0, &vm); ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
if (ret) if (ret)
return ret; return ret;
...@@ -136,6 +137,8 @@ gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -136,6 +137,8 @@ gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
static struct lock_class_key bar1_lock;
static struct lock_class_key bar3_lock;
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = nv_device(parent);
struct gf100_bar *bar; struct gf100_bar *bar;
bool has_bar3 = nv_device_resource_len(device, 3) != 0; bool has_bar3 = nv_device_resource_len(device, 3) != 0;
...@@ -148,13 +151,13 @@ gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -148,13 +151,13 @@ gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
/* BAR3 */ /* BAR3 */
if (has_bar3) { if (has_bar3) {
ret = gf100_bar_ctor_vm(bar, &bar->bar[0], 3); ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
if (ret) if (ret)
return ret; return ret;
} }
/* BAR1 */ /* BAR1 */
ret = gf100_bar_ctor_vm(bar, &bar->bar[1], 1); ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
if (ret) if (ret)
return ret; return ret;
......
...@@ -112,6 +112,8 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -112,6 +112,8 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
static struct lock_class_key bar1_lock;
static struct lock_class_key bar3_lock;
struct nvkm_device *device = nv_device(parent); struct nvkm_device *device = nv_device(parent);
struct nvkm_object *heap; struct nvkm_object *heap;
struct nvkm_vm *vm; struct nvkm_vm *vm;
...@@ -144,7 +146,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -144,7 +146,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
start = 0x0100000000ULL; start = 0x0100000000ULL;
limit = start + nv_device_resource_len(device, 3); limit = start + nv_device_resource_len(device, 3);
ret = nvkm_vm_new(device, start, limit, start, &vm); ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
if (ret) if (ret)
return ret; return ret;
...@@ -180,7 +182,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -180,7 +182,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
start = 0x0000000000ULL; start = 0x0000000000ULL;
limit = start + nv_device_resource_len(device, 1); limit = start + nv_device_resource_len(device, 1);
ret = nvkm_vm_new(device, start, limit--, start, &vm); ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
if (ret) if (ret)
return ret; return ret;
......
...@@ -240,9 +240,7 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) ...@@ -240,9 +240,7 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
mmu->map_pgt(vpgd->obj, pde, vpgt->obj); mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
} }
mutex_unlock(&nv_subdev(mmu)->mutex);
nvkm_gpuobj_ref(NULL, &pgt); nvkm_gpuobj_ref(NULL, &pgt);
mutex_lock(&nv_subdev(mmu)->mutex);
} }
} }
...@@ -252,7 +250,6 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) ...@@ -252,7 +250,6 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
struct nvkm_mmu *mmu = vm->mmu; struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
struct nvkm_vm_pgd *vpgd; struct nvkm_vm_pgd *vpgd;
struct nvkm_gpuobj *pgt;
int big = (type != mmu->spg_shift); int big = (type != mmu->spg_shift);
u32 pgt_size; u32 pgt_size;
int ret; int ret;
...@@ -260,26 +257,16 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) ...@@ -260,26 +257,16 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
pgt_size = (1 << (mmu->pgt_bits + 12)) >> type; pgt_size = (1 << (mmu->pgt_bits + 12)) >> type;
pgt_size *= 8; pgt_size *= 8;
mutex_unlock(&nv_subdev(mmu)->mutex);
ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000, ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &pgt); NVOBJ_FLAG_ZERO_ALLOC, &vpgt->obj[big]);
mutex_lock(&nv_subdev(mmu)->mutex);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
/* someone beat us to filling the PDE while we didn't have the lock */
if (unlikely(vpgt->refcount[big]++)) {
mutex_unlock(&nv_subdev(mmu)->mutex);
nvkm_gpuobj_ref(NULL, &pgt);
mutex_lock(&nv_subdev(mmu)->mutex);
return 0;
}
vpgt->obj[big] = pgt;
list_for_each_entry(vpgd, &vm->pgd_list, head) { list_for_each_entry(vpgd, &vm->pgd_list, head) {
mmu->map_pgt(vpgd->obj, pde, vpgt->obj); mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
} }
vpgt->refcount[big]++;
return 0; return 0;
} }
...@@ -293,11 +280,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, ...@@ -293,11 +280,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
u32 fpde, lpde, pde; u32 fpde, lpde, pde;
int ret; int ret;
mutex_lock(&nv_subdev(mmu)->mutex); mutex_lock(&vm->mutex);
ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
&vma->node); &vma->node);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
mutex_unlock(&nv_subdev(mmu)->mutex); mutex_unlock(&vm->mutex);
return ret; return ret;
} }
...@@ -318,11 +305,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, ...@@ -318,11 +305,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
if (pde != fpde) if (pde != fpde)
nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
nvkm_mm_free(&vm->mm, &vma->node); nvkm_mm_free(&vm->mm, &vma->node);
mutex_unlock(&nv_subdev(mmu)->mutex); mutex_unlock(&vm->mutex);
return ret; return ret;
} }
} }
mutex_unlock(&nv_subdev(mmu)->mutex); mutex_unlock(&vm->mutex);
vma->vm = NULL; vma->vm = NULL;
nvkm_vm_ref(vm, &vma->vm, NULL); nvkm_vm_ref(vm, &vma->vm, NULL);
...@@ -343,18 +330,19 @@ nvkm_vm_put(struct nvkm_vma *vma) ...@@ -343,18 +330,19 @@ nvkm_vm_put(struct nvkm_vma *vma)
fpde = (vma->node->offset >> mmu->pgt_bits); fpde = (vma->node->offset >> mmu->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
mutex_lock(&nv_subdev(mmu)->mutex); mutex_lock(&vm->mutex);
nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde);
nvkm_mm_free(&vm->mm, &vma->node); nvkm_mm_free(&vm->mm, &vma->node);
mutex_unlock(&nv_subdev(mmu)->mutex); mutex_unlock(&vm->mutex);
nvkm_vm_ref(NULL, &vma->vm, NULL); nvkm_vm_ref(NULL, &vma->vm, NULL);
} }
int int
nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
u32 block, struct nvkm_vm **pvm) u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
{ {
static struct lock_class_key _key;
struct nvkm_vm *vm; struct nvkm_vm *vm;
u64 mm_length = (offset + length) - mm_offset; u64 mm_length = (offset + length) - mm_offset;
int ret; int ret;
...@@ -363,6 +351,7 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, ...@@ -363,6 +351,7 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
if (!vm) if (!vm)
return -ENOMEM; return -ENOMEM;
__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
INIT_LIST_HEAD(&vm->pgd_list); INIT_LIST_HEAD(&vm->pgd_list);
vm->mmu = mmu; vm->mmu = mmu;
kref_init(&vm->refcount); kref_init(&vm->refcount);
...@@ -390,10 +379,10 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, ...@@ -390,10 +379,10 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
int int
nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset, nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
struct nvkm_vm **pvm) struct lock_class_key *key, struct nvkm_vm **pvm)
{ {
struct nvkm_mmu *mmu = nvkm_mmu(device); struct nvkm_mmu *mmu = nvkm_mmu(device);
return mmu->create(mmu, offset, length, mm_offset, pvm); return mmu->create(mmu, offset, length, mm_offset, key, pvm);
} }
static int static int
...@@ -412,25 +401,24 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) ...@@ -412,25 +401,24 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
nvkm_gpuobj_ref(pgd, &vpgd->obj); nvkm_gpuobj_ref(pgd, &vpgd->obj);
mutex_lock(&nv_subdev(mmu)->mutex); mutex_lock(&vm->mutex);
for (i = vm->fpde; i <= vm->lpde; i++) for (i = vm->fpde; i <= vm->lpde; i++)
mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
list_add(&vpgd->head, &vm->pgd_list); list_add(&vpgd->head, &vm->pgd_list);
mutex_unlock(&nv_subdev(mmu)->mutex); mutex_unlock(&vm->mutex);
return 0; return 0;
} }
static void static void
nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
{ {
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgd *vpgd, *tmp; struct nvkm_vm_pgd *vpgd, *tmp;
struct nvkm_gpuobj *pgd = NULL; struct nvkm_gpuobj *pgd = NULL;
if (!mpgd) if (!mpgd)
return; return;
mutex_lock(&nv_subdev(mmu)->mutex); mutex_lock(&vm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj == mpgd) { if (vpgd->obj == mpgd) {
pgd = vpgd->obj; pgd = vpgd->obj;
...@@ -439,7 +427,7 @@ nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) ...@@ -439,7 +427,7 @@ nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
break; break;
} }
} }
mutex_unlock(&nv_subdev(mmu)->mutex); mutex_unlock(&vm->mutex);
nvkm_gpuobj_ref(NULL, &pgd); nvkm_gpuobj_ref(NULL, &pgd);
} }
......
...@@ -197,9 +197,9 @@ gf100_vm_flush(struct nvkm_vm *vm) ...@@ -197,9 +197,9 @@ gf100_vm_flush(struct nvkm_vm *vm)
static int static int
gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
struct nvkm_vm **pvm) struct lock_class_key *key, struct nvkm_vm **pvm)
{ {
return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm); return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
} }
static int static int
......
...@@ -74,7 +74,7 @@ nv04_vm_flush(struct nvkm_vm *vm) ...@@ -74,7 +74,7 @@ nv04_vm_flush(struct nvkm_vm *vm)
int int
nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart, nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart,
struct nvkm_vm **pvm) struct lock_class_key *key, struct nvkm_vm **pvm)
{ {
return -EINVAL; return -EINVAL;
} }
...@@ -108,7 +108,7 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -108,7 +108,7 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
mmu->base.unmap = nv04_vm_unmap; mmu->base.unmap = nv04_vm_unmap;
mmu->base.flush = nv04_vm_flush; mmu->base.flush = nv04_vm_flush;
ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
&mmu->vm); &mmu->vm);
if (ret) if (ret)
return ret; return ret;
......
...@@ -116,7 +116,7 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -116,7 +116,7 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
mmu->base.unmap = nv41_vm_unmap; mmu->base.unmap = nv41_vm_unmap;
mmu->base.flush = nv41_vm_flush; mmu->base.flush = nv41_vm_flush;
ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
&mmu->vm); &mmu->vm);
if (ret) if (ret)
return ret; return ret;
......
...@@ -195,7 +195,7 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -195,7 +195,7 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
mmu->null = 0; mmu->null = 0;
} }
ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
&mmu->vm); &mmu->vm);
if (ret) if (ret)
return ret; return ret;
......
...@@ -201,14 +201,14 @@ nv50_vm_flush(struct nvkm_vm *vm) ...@@ -201,14 +201,14 @@ nv50_vm_flush(struct nvkm_vm *vm)
} }
static int static int
nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
u64 mm_offset, struct nvkm_vm **pvm) struct lock_class_key *key, struct nvkm_vm **pvm)
{ {
u32 block = (1 << (mmu->pgt_bits + 12)); u32 block = (1 << (mmu->pgt_bits + 12));
if (block > length) if (block > length)
block = length; block = length;
return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm); return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
} }
static int static int
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册