提交 2ef7a95f 编写于 作者: D Dave Airlie

Merge branch 'linux-4.15' of git://github.com/skeggsb/linux into drm-next

- Pascal temperature sensor support
- Improved BAR2 handling, greatly reduces time required to suspend
- Rework of the MMU code
  - Allows us to properly support Pascal's new MMU layout (implemented)
  - Lays the groundwork for improved userspace APIs later
- Misc other fixes

* 'linux-4.15' of git://github.com/skeggsb/linux: (151 commits)
  drm/nouveau/gr/gf100-: don't prevent module load if firmware missing
  drm/nouveau/mmu: remove old vmm frontend
  drm/nouveau: improve selection of GPU page size
  drm/nouveau: switch over to new memory and vmm interfaces
  drm/nouveau: remove unused nouveau_fence_work()
  drm/nouveau: queue delayed unmapping of VMAs on client workqueue
  drm/nouveau: implement per-client delayed workqueue with fence support
  drm/nouveau: determine memory class for each client
  drm/nouveau: pass handle of vmm object to channel allocation ioctls
  drm/nouveau: switch to vmm limit
  drm/nouveau: allocate vmm object for every client
  drm/nouveau: replace use of cpu_coherent with memory types
  drm/nouveau: use nvif_mmu_type to determine BAR1 caching
  drm/nouveau: fetch memory type indices that we care about for ttm
  drm/nouveau: consolidate handling of dma mask
  drm/nouveau: check kind validity against mmu object
  drm/nouveau: allocate mmu object for every client
  drm/nouveau: remove trivial cases of nvxx_device() usage
  drm/nouveau/mmu: define user interfaces to mmu vmm opertaions
  drm/nouveau/mmu: define user interfaces to mmu memory allocation
  ...
...@@ -30,9 +30,11 @@ nouveau-y += nouveau_vga.o ...@@ -30,9 +30,11 @@ nouveau-y += nouveau_vga.o
# DRM - memory management # DRM - memory management
nouveau-y += nouveau_bo.o nouveau-y += nouveau_bo.o
nouveau-y += nouveau_gem.o nouveau-y += nouveau_gem.o
nouveau-y += nouveau_mem.o
nouveau-y += nouveau_prime.o nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o nouveau-y += nouveau_ttm.o
nouveau-y += nouveau_vmm.o
# DRM - modesetting # DRM - modesetting
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
......
...@@ -56,6 +56,13 @@ config NOUVEAU_DEBUG_DEFAULT ...@@ -56,6 +56,13 @@ config NOUVEAU_DEBUG_DEFAULT
help help
Selects the default debug level Selects the default debug level
config NOUVEAU_DEBUG_MMU
bool "Enable additional MMU debugging"
depends on DRM_NOUVEAU
default n
help
Say Y here if you want to enable verbose MMU debug output.
config DRM_NOUVEAU_BACKLIGHT config DRM_NOUVEAU_BACKLIGHT
bool "Support for backlight control" bool "Support for backlight control"
depends on DRM_NOUVEAU depends on DRM_NOUVEAU
......
...@@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev) ...@@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev)
if (!disp) if (!disp)
return -ENOMEM; return -ENOMEM;
nvif_object_map(&drm->client.device.object); nvif_object_map(&drm->client.device.object, NULL, 0);
nouveau_display(dev)->priv = disp; nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy; nouveau_display(dev)->dtor = nv04_display_destroy;
......
...@@ -5,7 +5,7 @@ struct nv50_channel_dma_v0 { ...@@ -5,7 +5,7 @@ struct nv50_channel_dma_v0 {
__u8 version; __u8 version;
__u8 chid; __u8 chid;
__u8 pad02[6]; __u8 pad02[6];
__u64 vm; __u64 vmm;
__u64 pushbuf; __u64 pushbuf;
__u64 offset; __u64 offset;
}; };
......
...@@ -8,6 +8,6 @@ struct nv50_channel_gpfifo_v0 { ...@@ -8,6 +8,6 @@ struct nv50_channel_gpfifo_v0 {
__u32 ilength; __u32 ilength;
__u64 ioffset; __u64 ioffset;
__u64 pushbuf; __u64 pushbuf;
__u64 vm; __u64 vmm;
}; };
#endif #endif
...@@ -5,7 +5,7 @@ struct g82_channel_dma_v0 { ...@@ -5,7 +5,7 @@ struct g82_channel_dma_v0 {
__u8 version; __u8 version;
__u8 chid; __u8 chid;
__u8 pad02[6]; __u8 pad02[6];
__u64 vm; __u64 vmm;
__u64 pushbuf; __u64 pushbuf;
__u64 offset; __u64 offset;
}; };
......
...@@ -8,7 +8,7 @@ struct g82_channel_gpfifo_v0 { ...@@ -8,7 +8,7 @@ struct g82_channel_gpfifo_v0 {
__u32 ilength; __u32 ilength;
__u64 ioffset; __u64 ioffset;
__u64 pushbuf; __u64 pushbuf;
__u64 vm; __u64 vmm;
}; };
#define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00 #define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00
......
...@@ -7,7 +7,7 @@ struct fermi_channel_gpfifo_v0 { ...@@ -7,7 +7,7 @@ struct fermi_channel_gpfifo_v0 {
__u8 pad02[2]; __u8 pad02[2];
__u32 ilength; __u32 ilength;
__u64 ioffset; __u64 ioffset;
__u64 vm; __u64 vmm;
}; };
#define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00 #define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00
......
...@@ -22,7 +22,7 @@ struct kepler_channel_gpfifo_a_v0 { ...@@ -22,7 +22,7 @@ struct kepler_channel_gpfifo_a_v0 {
__u32 engines; __u32 engines;
__u32 ilength; __u32 ilength;
__u64 ioffset; __u64 ioffset;
__u64 vm; __u64 vmm;
}; };
#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00 #define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00
......
...@@ -14,6 +14,23 @@ ...@@ -14,6 +14,23 @@
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006 #define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007 #define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007
#define NVIF_CLASS_MMU /* if0008.h */ 0x80000008
#define NVIF_CLASS_MMU_NV04 /* if0008.h */ 0x80000009
#define NVIF_CLASS_MMU_NV50 /* if0008.h */ 0x80005009
#define NVIF_CLASS_MMU_GF100 /* if0008.h */ 0x80009009
#define NVIF_CLASS_MEM /* if000a.h */ 0x8000000a
#define NVIF_CLASS_MEM_NV04 /* if000b.h */ 0x8000000b
#define NVIF_CLASS_MEM_NV50 /* if500b.h */ 0x8000500b
#define NVIF_CLASS_MEM_GF100 /* if900b.h */ 0x8000900b
#define NVIF_CLASS_VMM /* if000c.h */ 0x8000000c
#define NVIF_CLASS_VMM_NV04 /* if000d.h */ 0x8000000d
#define NVIF_CLASS_VMM_NV50 /* if500d.h */ 0x8000500d
#define NVIF_CLASS_VMM_GF100 /* if900d.h */ 0x8000900d
#define NVIF_CLASS_VMM_GM200 /* ifb00d.h */ 0x8000b00d
#define NVIF_CLASS_VMM_GP100 /* ifc00d.h */ 0x8000c00d
/* the below match nvidia-assigned (either in hw, or sw) class numbers */ /* the below match nvidia-assigned (either in hw, or sw) class numbers */
#define NV_NULL_CLASS 0x00000030 #define NV_NULL_CLASS 0x00000030
......
...@@ -38,7 +38,6 @@ u64 nvif_device_time(struct nvif_device *); ...@@ -38,7 +38,6 @@ u64 nvif_device_time(struct nvif_device *);
/*XXX*/ /*XXX*/
#include <subdev/bios.h> #include <subdev/bios.h>
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/mmu.h>
#include <subdev/bar.h> #include <subdev/bar.h>
#include <subdev/gpio.h> #include <subdev/gpio.h>
#include <subdev/clk.h> #include <subdev/clk.h>
...@@ -57,8 +56,6 @@ u64 nvif_device_time(struct nvif_device *); ...@@ -57,8 +56,6 @@ u64 nvif_device_time(struct nvif_device *);
}) })
#define nvxx_bios(a) nvxx_device(a)->bios #define nvxx_bios(a) nvxx_device(a)->bios
#define nvxx_fb(a) nvxx_device(a)->fb #define nvxx_fb(a) nvxx_device(a)->fb
#define nvxx_mmu(a) nvxx_device(a)->mmu
#define nvxx_bar(a) nvxx_device(a)->bar
#define nvxx_gpio(a) nvxx_device(a)->gpio #define nvxx_gpio(a) nvxx_device(a)->gpio
#define nvxx_clk(a) nvxx_device(a)->clk #define nvxx_clk(a) nvxx_device(a)->clk
#define nvxx_i2c(a) nvxx_device(a)->i2c #define nvxx_i2c(a) nvxx_device(a)->i2c
...@@ -66,10 +63,8 @@ u64 nvif_device_time(struct nvif_device *); ...@@ -66,10 +63,8 @@ u64 nvif_device_time(struct nvif_device *);
#define nvxx_therm(a) nvxx_device(a)->therm #define nvxx_therm(a) nvxx_device(a)->therm
#define nvxx_volt(a) nvxx_device(a)->volt #define nvxx_volt(a) nvxx_device(a)->volt
#include <core/device.h>
#include <engine/fifo.h> #include <engine/fifo.h>
#include <engine/gr.h> #include <engine/gr.h>
#include <engine/sw.h>
#define nvxx_fifo(a) nvxx_device(a)->fifo #define nvxx_fifo(a) nvxx_device(a)->fifo
#define nvxx_gr(a) nvxx_device(a)->gr #define nvxx_gr(a) nvxx_device(a)->gr
......
#ifndef __NVIF_IF0008_H__
#define __NVIF_IF0008_H__
struct nvif_mmu_v0 {
__u8 version;
__u8 dmabits;
__u8 heap_nr;
__u8 type_nr;
__u16 kind_nr;
};
#define NVIF_MMU_V0_HEAP 0x00
#define NVIF_MMU_V0_TYPE 0x01
#define NVIF_MMU_V0_KIND 0x02
struct nvif_mmu_heap_v0 {
__u8 version;
__u8 index;
__u8 pad02[6];
__u64 size;
};
struct nvif_mmu_type_v0 {
__u8 version;
__u8 index;
__u8 heap;
__u8 vram;
__u8 host;
__u8 comp;
__u8 disp;
__u8 kind;
__u8 mappable;
__u8 coherent;
__u8 uncached;
};
struct nvif_mmu_kind_v0 {
__u8 version;
__u8 pad01[1];
__u16 count;
__u8 data[];
};
#endif
#ifndef __NVIF_IF000A_H__
#define __NVIF_IF000A_H__
struct nvif_mem_v0 {
__u8 version;
__u8 type;
__u8 page;
__u8 pad03[5];
__u64 size;
__u64 addr;
__u8 data[];
};
struct nvif_mem_ram_vn {
};
struct nvif_mem_ram_v0 {
__u8 version;
__u8 pad01[7];
dma_addr_t *dma;
struct scatterlist *sgl;
};
#endif
#ifndef __NVIF_IF000B_H__
#define __NVIF_IF000B_H__
#include "if000a.h"
struct nv04_mem_vn {
/* nvkm_mem_vX ... */
};
struct nv04_mem_map_vn {
};
#endif
#ifndef __NVIF_IF000C_H__
#define __NVIF_IF000C_H__
struct nvif_vmm_v0 {
__u8 version;
__u8 page_nr;
__u8 pad02[6];
__u64 addr;
__u64 size;
__u8 data[];
};
#define NVIF_VMM_V0_PAGE 0x00
#define NVIF_VMM_V0_GET 0x01
#define NVIF_VMM_V0_PUT 0x02
#define NVIF_VMM_V0_MAP 0x03
#define NVIF_VMM_V0_UNMAP 0x04
struct nvif_vmm_page_v0 {
__u8 version;
__u8 index;
__u8 shift;
__u8 sparse;
__u8 vram;
__u8 host;
__u8 comp;
__u8 pad07[1];
};
struct nvif_vmm_get_v0 {
__u8 version;
#define NVIF_VMM_GET_V0_ADDR 0x00
#define NVIF_VMM_GET_V0_PTES 0x01
#define NVIF_VMM_GET_V0_LAZY 0x02
__u8 type;
__u8 sparse;
__u8 page;
__u8 align;
__u8 pad05[3];
__u64 size;
__u64 addr;
};
struct nvif_vmm_put_v0 {
__u8 version;
__u8 pad01[7];
__u64 addr;
};
struct nvif_vmm_map_v0 {
__u8 version;
__u8 pad01[7];
__u64 addr;
__u64 size;
__u64 memory;
__u64 offset;
__u8 data[];
};
struct nvif_vmm_unmap_v0 {
__u8 version;
__u8 pad01[7];
__u64 addr;
};
#endif
#ifndef __NVIF_IF000D_H__
#define __NVIF_IF000D_H__
#include "if000c.h"
struct nv04_vmm_vn {
/* nvif_vmm_vX ... */
};
struct nv04_vmm_map_vn {
/* nvif_vmm_map_vX ... */
};
#endif
#ifndef __NVIF_IF500B_H__
#define __NVIF_IF500B_H__
#include "if000a.h"
struct nv50_mem_vn {
/* nvif_mem_vX ... */
};
struct nv50_mem_v0 {
/* nvif_mem_vX ... */
__u8 version;
__u8 bankswz;
__u8 contig;
};
struct nv50_mem_map_vn {
};
struct nv50_mem_map_v0 {
__u8 version;
__u8 ro;
__u8 kind;
__u8 comp;
};
#endif
#ifndef __NVIF_IF500D_H__
#define __NVIF_IF500D_H__
#include "if000c.h"
struct nv50_vmm_vn {
/* nvif_vmm_vX ... */
};
struct nv50_vmm_map_vn {
/* nvif_vmm_map_vX ... */
};
struct nv50_vmm_map_v0 {
/* nvif_vmm_map_vX ... */
__u8 version;
__u8 ro;
__u8 priv;
__u8 kind;
__u8 comp;
};
#endif
#ifndef __NVIF_IF900B_H__
#define __NVIF_IF900B_H__
#include "if000a.h"
struct gf100_mem_vn {
/* nvif_mem_vX ... */
};
struct gf100_mem_v0 {
/* nvif_mem_vX ... */
__u8 version;
__u8 contig;
};
struct gf100_mem_map_vn {
};
struct gf100_mem_map_v0 {
__u8 version;
__u8 ro;
__u8 kind;
};
#endif
#ifndef __NVIF_IF900D_H__
#define __NVIF_IF900D_H__
#include "if000c.h"
struct gf100_vmm_vn {
/* nvif_vmm_vX ... */
};
struct gf100_vmm_map_vn {
/* nvif_vmm_map_vX ... */
};
struct gf100_vmm_map_v0 {
/* nvif_vmm_map_vX ... */
__u8 version;
__u8 vol;
__u8 ro;
__u8 priv;
__u8 kind;
};
#endif
#ifndef __NVIF_IFB00D_H__
#define __NVIF_IFB00D_H__
#include "if000c.h"
struct gm200_vmm_vn {
/* nvif_vmm_vX ... */
};
struct gm200_vmm_v0 {
/* nvif_vmm_vX ... */
__u8 version;
__u8 bigpage;
};
struct gm200_vmm_map_vn {
/* nvif_vmm_map_vX ... */
};
struct gm200_vmm_map_v0 {
/* nvif_vmm_map_vX ... */
__u8 version;
__u8 vol;
__u8 ro;
__u8 priv;
__u8 kind;
};
#endif
#ifndef __NVIF_IFC00D_H__
#define __NVIF_IFC00D_H__
#include "if000c.h"
struct gp100_vmm_vn {
/* nvif_vmm_vX ... */
};
struct gp100_vmm_map_vn {
/* nvif_vmm_map_vX ... */
};
struct gp100_vmm_map_v0 {
/* nvif_vmm_map_vX ... */
__u8 version;
__u8 vol;
__u8 ro;
__u8 priv;
__u8 kind;
};
#endif
#ifndef __NVIF_IOCTL_H__ #ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__ #define __NVIF_IOCTL_H__
#define NVIF_VERSION_LATEST 0x0000000000000000ULL #define NVIF_VERSION_LATEST 0x0000000000000100ULL
struct nvif_ioctl_v0 { struct nvif_ioctl_v0 {
__u8 version; __u8 version;
...@@ -83,9 +83,13 @@ struct nvif_ioctl_wr_v0 { ...@@ -83,9 +83,13 @@ struct nvif_ioctl_wr_v0 {
struct nvif_ioctl_map_v0 { struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */ /* nvif_ioctl ... */
__u8 version; __u8 version;
__u8 pad01[3]; #define NVIF_IOCTL_MAP_V0_IO 0x00
__u32 length; #define NVIF_IOCTL_MAP_V0_VA 0x01
__u8 type;
__u8 pad02[6];
__u64 handle; __u64 handle;
__u64 length;
__u8 data[];
}; };
struct nvif_ioctl_unmap { struct nvif_ioctl_unmap {
......
#ifndef __NVIF_MEM_H__
#define __NVIF_MEM_H__
#include "mmu.h"
struct nvif_mem {
struct nvif_object object;
u8 type;
u8 page;
u64 addr;
u64 size;
};
int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
u64 size, void *argv, u32 argc, struct nvif_mem *);
int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
u64 size, void *argv, u32 argc, struct nvif_mem *);
void nvif_mem_fini(struct nvif_mem *);
#endif
#ifndef __NVIF_MMU_H__
#define __NVIF_MMU_H__
#include <nvif/object.h>
struct nvif_mmu {
struct nvif_object object;
u8 dmabits;
u8 heap_nr;
u8 type_nr;
u16 kind_nr;
struct {
u64 size;
} *heap;
struct {
#define NVIF_MEM_VRAM 0x01
#define NVIF_MEM_HOST 0x02
#define NVIF_MEM_COMP 0x04
#define NVIF_MEM_DISP 0x08
#define NVIF_MEM_KIND 0x10
#define NVIF_MEM_MAPPABLE 0x20
#define NVIF_MEM_COHERENT 0x40
#define NVIF_MEM_UNCACHED 0x80
u8 type;
u8 heap;
} *type;
u8 *kind;
};
int nvif_mmu_init(struct nvif_object *, s32 oclass, struct nvif_mmu *);
void nvif_mmu_fini(struct nvif_mmu *);
static inline bool
nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
{
const u8 invalid = mmu->kind_nr - 1;
if (kind) {
if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
return false;
}
return true;
}
static inline int
nvif_mmu_type(struct nvif_mmu *mmu, u8 mask)
{
int i;
for (i = 0; i < mmu->type_nr; i++) {
if ((mmu->type[i].type & mask) == mask)
return i;
}
return -EINVAL;
}
#endif
...@@ -16,7 +16,7 @@ struct nvif_object { ...@@ -16,7 +16,7 @@ struct nvif_object {
void *priv; /*XXX: hack */ void *priv; /*XXX: hack */
struct { struct {
void __iomem *ptr; void __iomem *ptr;
u32 size; u64 size;
} map; } map;
}; };
...@@ -29,7 +29,10 @@ void nvif_object_sclass_put(struct nvif_sclass **); ...@@ -29,7 +29,10 @@ void nvif_object_sclass_put(struct nvif_sclass **);
u32 nvif_object_rd(struct nvif_object *, int, u64); u32 nvif_object_rd(struct nvif_object *, int, u64);
void nvif_object_wr(struct nvif_object *, int, u64, u32); void nvif_object_wr(struct nvif_object *, int, u64, u32);
int nvif_object_mthd(struct nvif_object *, u32, void *, u32); int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
int nvif_object_map(struct nvif_object *); int nvif_object_map_handle(struct nvif_object *, void *, u32,
u64 *handle, u64 *length);
void nvif_object_unmap_handle(struct nvif_object *);
int nvif_object_map(struct nvif_object *, void *, u32);
void nvif_object_unmap(struct nvif_object *); void nvif_object_unmap(struct nvif_object *);
#define nvif_handle(a) (unsigned long)(void *)(a) #define nvif_handle(a) (unsigned long)(void *)(a)
......
...@@ -33,18 +33,4 @@ ...@@ -33,18 +33,4 @@
#include <soc/tegra/fuse.h> #include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h> #include <soc/tegra/pmc.h>
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
#define iowrite16_native iowrite16be
#define ioread32_native ioread32be
#define iowrite32_native iowrite32be
#else /* def __BIG_ENDIAN */
#define ioread16_native ioread16
#define iowrite16_native iowrite16
#define ioread32_native ioread32
#define iowrite32_native iowrite32
#endif /* def __BIG_ENDIAN else */
#endif /* !ioread32_native */
#endif #endif
#ifndef __NVIF_VMM_H__
#define __NVIF_VMM_H__
#include <nvif/object.h>
struct nvif_mem;
struct nvif_mmu;
enum nvif_vmm_get {
ADDR,
PTES,
LAZY
};
struct nvif_vma {
u64 addr;
u64 size;
};
struct nvif_vmm {
struct nvif_object object;
u64 start;
u64 limit;
struct {
u8 shift;
bool sparse:1;
bool vram:1;
bool host:1;
bool comp:1;
} *page;
int page_nr;
};
int nvif_vmm_init(struct nvif_mmu *, s32 oclass, u64 addr, u64 size,
void *argv, u32 argc, struct nvif_vmm *);
void nvif_vmm_fini(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *);
void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64);
#endif
...@@ -16,7 +16,8 @@ struct nvkm_client { ...@@ -16,7 +16,8 @@ struct nvkm_client {
void *data; void *data;
int (*ntfy)(const void *, u32, const void *, u32); int (*ntfy)(const void *, u32, const void *, u32);
struct nvkm_vm *vm; struct list_head umem;
spinlock_t lock;
}; };
int nvkm_client_new(const char *name, u64 device, const char *cfg, int nvkm_client_new(const char *name, u64 device, const char *cfg,
......
#ifndef __NVKM_DEVICE_H__ #ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__ #define __NVKM_DEVICE_H__
#include <core/oclass.h>
#include <core/event.h> #include <core/event.h>
#include <core/object.h>
enum nvkm_devidx { enum nvkm_devidx {
NVKM_SUBDEV_PCI, NVKM_SUBDEV_PCI,
......
...@@ -15,6 +15,7 @@ struct nvkm_engine { ...@@ -15,6 +15,7 @@ struct nvkm_engine {
struct nvkm_engine_func { struct nvkm_engine_func {
void *(*dtor)(struct nvkm_engine *); void *(*dtor)(struct nvkm_engine *);
void (*preinit)(struct nvkm_engine *);
int (*oneinit)(struct nvkm_engine *); int (*oneinit)(struct nvkm_engine *);
int (*init)(struct nvkm_engine *); int (*init)(struct nvkm_engine *);
int (*fini)(struct nvkm_engine *, bool suspend); int (*fini)(struct nvkm_engine *, bool suspend);
......
#ifndef __NVKM_GPUOBJ_H__ #ifndef __NVKM_GPUOBJ_H__
#define __NVKM_GPUOBJ_H__ #define __NVKM_GPUOBJ_H__
#include <core/object.h>
#include <core/memory.h> #include <core/memory.h>
#include <core/mm.h> #include <core/mm.h>
struct nvkm_vma;
struct nvkm_vm;
#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
#define NVOBJ_FLAG_HEAP 0x00000004 #define NVOBJ_FLAG_HEAP 0x00000004
struct nvkm_gpuobj { struct nvkm_gpuobj {
struct nvkm_object object; union {
const struct nvkm_gpuobj_func *func; const struct nvkm_gpuobj_func *func;
const struct nvkm_gpuobj_func *ptrs;
};
struct nvkm_gpuobj *parent; struct nvkm_gpuobj *parent;
struct nvkm_memory *memory; struct nvkm_memory *memory;
struct nvkm_mm_node *node; struct nvkm_mm_node *node;
...@@ -28,15 +27,14 @@ struct nvkm_gpuobj_func { ...@@ -28,15 +27,14 @@ struct nvkm_gpuobj_func {
void (*release)(struct nvkm_gpuobj *); void (*release)(struct nvkm_gpuobj *);
u32 (*rd32)(struct nvkm_gpuobj *, u32 offset); u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data); void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *,
struct nvkm_vma *, void *argv, u32 argc);
}; };
int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero, int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj **); struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
void nvkm_gpuobj_del(struct nvkm_gpuobj **); void nvkm_gpuobj_del(struct nvkm_gpuobj **);
int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **); int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
struct nvkm_vma *);
void nvkm_gpuobj_unmap(struct nvkm_vma *);
void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src, void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
u32 length); u32 length);
void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset, void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
......
...@@ -3,7 +3,12 @@ ...@@ -3,7 +3,12 @@
#include <core/os.h> #include <core/os.h>
struct nvkm_device; struct nvkm_device;
struct nvkm_vma; struct nvkm_vma;
struct nvkm_vm; struct nvkm_vmm;
struct nvkm_tags {
struct nvkm_mm_node *mn;
refcount_t refcount;
};
enum nvkm_memory_target { enum nvkm_memory_target {
NVKM_MEM_TARGET_INST, /* instance memory */ NVKM_MEM_TARGET_INST, /* instance memory */
...@@ -14,41 +19,84 @@ enum nvkm_memory_target { ...@@ -14,41 +19,84 @@ enum nvkm_memory_target {
struct nvkm_memory { struct nvkm_memory {
const struct nvkm_memory_func *func; const struct nvkm_memory_func *func;
const struct nvkm_memory_ptrs *ptrs;
struct kref kref;
struct nvkm_tags *tags;
}; };
struct nvkm_memory_func { struct nvkm_memory_func {
void *(*dtor)(struct nvkm_memory *); void *(*dtor)(struct nvkm_memory *);
enum nvkm_memory_target (*target)(struct nvkm_memory *); enum nvkm_memory_target (*target)(struct nvkm_memory *);
u8 (*page)(struct nvkm_memory *);
u64 (*addr)(struct nvkm_memory *); u64 (*addr)(struct nvkm_memory *);
u64 (*size)(struct nvkm_memory *); u64 (*size)(struct nvkm_memory *);
void (*boot)(struct nvkm_memory *, struct nvkm_vm *); void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
void __iomem *(*acquire)(struct nvkm_memory *); void __iomem *(*acquire)(struct nvkm_memory *);
void (*release)(struct nvkm_memory *); void (*release)(struct nvkm_memory *);
int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *,
struct nvkm_vma *, void *argv, u32 argc);
};
struct nvkm_memory_ptrs {
u32 (*rd32)(struct nvkm_memory *, u64 offset); u32 (*rd32)(struct nvkm_memory *, u64 offset);
void (*wr32)(struct nvkm_memory *, u64 offset, u32 data); void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
}; };
void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *); void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target, int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
u64 size, u32 align, bool zero, struct nvkm_memory **); u64 size, u32 align, bool zero, struct nvkm_memory **);
void nvkm_memory_del(struct nvkm_memory **); struct nvkm_memory *nvkm_memory_ref(struct nvkm_memory *);
void nvkm_memory_unref(struct nvkm_memory **);
int nvkm_memory_tags_get(struct nvkm_memory *, struct nvkm_device *, u32 tags,
void (*clear)(struct nvkm_device *, u32, u32),
struct nvkm_tags **);
void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
struct nvkm_tags **);
#define nvkm_memory_target(p) (p)->func->target(p) #define nvkm_memory_target(p) (p)->func->target(p)
#define nvkm_memory_page(p) (p)->func->page(p)
#define nvkm_memory_addr(p) (p)->func->addr(p) #define nvkm_memory_addr(p) (p)->func->addr(p)
#define nvkm_memory_size(p) (p)->func->size(p) #define nvkm_memory_size(p) (p)->func->size(p)
#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v)) #define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o)) #define nvkm_memory_map(p,o,vm,va,av,ac) \
(p)->func->map((p),(o),(vm),(va),(av),(ac))
/* accessor macros - kmap()/done() must bracket use of the other accessor /* accessor macros - kmap()/done() must bracket use of the other accessor
* macros to guarantee correct behaviour across all chipsets * macros to guarantee correct behaviour across all chipsets
*/ */
#define nvkm_kmap(o) (o)->func->acquire(o) #define nvkm_kmap(o) (o)->func->acquire(o)
#define nvkm_ro32(o,a) (o)->func->rd32((o), (a)) #define nvkm_done(o) (o)->func->release(o)
#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
#define nvkm_ro32(o,a) (o)->ptrs->rd32((o), (a))
#define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d))
#define nvkm_mo32(o,a,m,d) ({ \ #define nvkm_mo32(o,a,m,d) ({ \
u32 _addr = (a), _data = nvkm_ro32((o), _addr); \ u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \ nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
_data; \ _data; \
}) })
#define nvkm_done(o) (o)->func->release(o)
#define nvkm_wo64(o,a,d) do { \
u64 __a = (a), __d = (d); \
nvkm_wo32((o), __a + 0, lower_32_bits(__d)); \
nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \
} while(0)
#define nvkm_fill(t,s,o,a,d,c) do { \
u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \
u##t __iomem *_m = nvkm_kmap(o); \
if (likely(_m)) { \
if (_d) { \
while (_c--) \
iowrite##t##_native(_d, &_m[_o++]); \
} else { \
memset_io(&_m[_o], _d, _s); \
} \
} else { \
for (; _c; _c--, _a += BIT(s)) \
nvkm_wo##t((o), _a, _d); \
} \
nvkm_done(o); \
} while(0)
#define nvkm_fo32(o,a,d,c) nvkm_fill(32, 2, (o), (a), (d), (c))
#define nvkm_fo64(o,a,d,c) nvkm_fill(64, 3, (o), (a), (d), (c))
#endif #endif
...@@ -30,7 +30,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm) ...@@ -30,7 +30,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm)
return mm->heap_nodes; return mm->heap_nodes;
} }
int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block); int nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
int nvkm_mm_fini(struct nvkm_mm *); int nvkm_mm_fini(struct nvkm_mm *);
int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
u32 size_min, u32 align, struct nvkm_mm_node **); u32 size_min, u32 align, struct nvkm_mm_node **);
...@@ -39,9 +39,39 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, ...@@ -39,9 +39,39 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
void nvkm_mm_dump(struct nvkm_mm *, const char *); void nvkm_mm_dump(struct nvkm_mm *, const char *);
static inline u32
nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap)
{
struct nvkm_mm_node *node;
u32 size = 0;
list_for_each_entry(node, &mm->nodes, nl_entry) {
if (node->heap == heap)
size += node->length;
}
return size;
}
static inline bool static inline bool
nvkm_mm_contiguous(struct nvkm_mm_node *node) nvkm_mm_contiguous(struct nvkm_mm_node *node)
{ {
return !node->next; return !node->next;
} }
static inline u32
nvkm_mm_addr(struct nvkm_mm_node *node)
{
if (WARN_ON(!nvkm_mm_contiguous(node)))
return 0;
return node->offset;
}
static inline u32
nvkm_mm_size(struct nvkm_mm_node *node)
{
u32 size = 0;
do {
size += node->length;
} while ((node = node->next));
return size;
}
#endif #endif
#ifndef __NVKM_OBJECT_H__ #ifndef __NVKM_OBJECT_H__
#define __NVKM_OBJECT_H__ #define __NVKM_OBJECT_H__
#include <core/os.h> #include <core/oclass.h>
#include <core/debug.h>
struct nvkm_event; struct nvkm_event;
struct nvkm_gpuobj; struct nvkm_gpuobj;
struct nvkm_oclass;
struct nvkm_object { struct nvkm_object {
const struct nvkm_object_func *func; const struct nvkm_object_func *func;
...@@ -21,13 +19,20 @@ struct nvkm_object { ...@@ -21,13 +19,20 @@ struct nvkm_object {
struct rb_node node; struct rb_node node;
}; };
enum nvkm_object_map {
NVKM_OBJECT_MAP_IO,
NVKM_OBJECT_MAP_VA
};
struct nvkm_object_func { struct nvkm_object_func {
void *(*dtor)(struct nvkm_object *); void *(*dtor)(struct nvkm_object *);
int (*init)(struct nvkm_object *); int (*init)(struct nvkm_object *);
int (*fini)(struct nvkm_object *, bool suspend); int (*fini)(struct nvkm_object *, bool suspend);
int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size); int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **); int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
int (*map)(struct nvkm_object *, u64 *addr, u32 *size); int (*map)(struct nvkm_object *, void *argv, u32 argc,
enum nvkm_object_map *, u64 *addr, u64 *size);
int (*unmap)(struct nvkm_object *);
int (*rd08)(struct nvkm_object *, u64 addr, u8 *data); int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
int (*rd16)(struct nvkm_object *, u64 addr, u16 *data); int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
int (*rd32)(struct nvkm_object *, u64 addr, u32 *data); int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
...@@ -52,7 +57,9 @@ int nvkm_object_init(struct nvkm_object *); ...@@ -52,7 +57,9 @@ int nvkm_object_init(struct nvkm_object *);
int nvkm_object_fini(struct nvkm_object *, bool suspend); int nvkm_object_fini(struct nvkm_object *, bool suspend);
int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size); int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **); int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size); int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
enum nvkm_object_map *, u64 *addr, u64 *size);
int nvkm_object_unmap(struct nvkm_object *);
int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data); int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data);
int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data); int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data); int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
...@@ -66,28 +73,4 @@ bool nvkm_object_insert(struct nvkm_object *); ...@@ -66,28 +73,4 @@ bool nvkm_object_insert(struct nvkm_object *);
void nvkm_object_remove(struct nvkm_object *); void nvkm_object_remove(struct nvkm_object *);
struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object, struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
const struct nvkm_object_func *); const struct nvkm_object_func *);
struct nvkm_sclass {
int minver;
int maxver;
s32 oclass;
const struct nvkm_object_func *func;
int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
};
struct nvkm_oclass {
int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
struct nvkm_sclass base;
const void *priv;
const void *engn;
u32 handle;
u8 route;
u64 token;
u64 object;
struct nvkm_client *client;
struct nvkm_object *parent;
struct nvkm_engine *engine;
};
#endif #endif
#ifndef __NVKM_OCLASS_H__
#define __NVKM_OCLASS_H__
#include <core/os.h>
#include <core/debug.h>
struct nvkm_oclass;
struct nvkm_object;
struct nvkm_sclass {
int minver;
int maxver;
s32 oclass;
const struct nvkm_object_func *func;
int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
};
struct nvkm_oclass {
int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
struct nvkm_sclass base;
const void *priv;
const void *engn;
u32 handle;
u8 route;
u64 token;
u64 object;
struct nvkm_client *client;
struct nvkm_object *parent;
struct nvkm_engine *engine;
};
#endif
#ifndef __NVKM_OS_H__ #ifndef __NVKM_OS_H__
#define __NVKM_OS_H__ #define __NVKM_OS_H__
#include <nvif/os.h> #include <nvif/os.h>
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
#define iowrite16_native iowrite16be
#define ioread32_native ioread32be
#define iowrite32_native iowrite32be
#else
#define ioread16_native ioread16
#define iowrite16_native iowrite16
#define ioread32_native ioread32
#define iowrite32_native iowrite32
#endif
#define iowrite64_native(v,p) do { \
u32 __iomem *_p = (u32 __iomem *)(p); \
u64 _v = (v); \
iowrite32_native(lower_32_bits(_v), &_p[0]); \
iowrite32_native(upper_32_bits(_v), &_p[1]); \
} while(0)
#endif #endif
#ifndef __NVKM_RAMHT_H__ #ifndef __NVKM_RAMHT_H__
#define __NVKM_RAMHT_H__ #define __NVKM_RAMHT_H__
#include <core/gpuobj.h> #include <core/gpuobj.h>
struct nvkm_object;
struct nvkm_ramht_data { struct nvkm_ramht_data {
struct nvkm_gpuobj *inst; struct nvkm_gpuobj *inst;
......
...@@ -33,7 +33,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *); ...@@ -33,7 +33,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
/* subdev logging */ /* subdev logging */
#define nvkm_printk_(s,l,p,f,a...) do { \ #define nvkm_printk_(s,l,p,f,a...) do { \
const struct nvkm_subdev *_subdev = (s); \ const struct nvkm_subdev *_subdev = (s); \
if (_subdev->debug >= (l)) { \ if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) { \
dev_##p(_subdev->device->dev, "%s: "f, \ dev_##p(_subdev->device->dev, "%s: "f, \
nvkm_subdev_name[_subdev->index], ##a); \ nvkm_subdev_name[_subdev->index], ##a); \
} \ } \
......
#ifndef __NVKM_DMA_H__ #ifndef __NVKM_DMA_H__
#define __NVKM_DMA_H__ #define __NVKM_DMA_H__
#include <core/engine.h> #include <core/engine.h>
#include <core/object.h>
struct nvkm_client; struct nvkm_client;
struct nvkm_dmaobj { struct nvkm_dmaobj {
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine) #define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h> #include <core/engine.h>
struct nvkm_fifo_chan; struct nvkm_fifo_chan;
struct nvkm_gpuobj;
enum nvkm_falcon_dmaidx { enum nvkm_falcon_dmaidx {
FALCON_DMAIDX_UCODE = 0, FALCON_DMAIDX_UCODE = 0,
...@@ -77,7 +78,7 @@ struct nvkm_falcon_func { ...@@ -77,7 +78,7 @@ struct nvkm_falcon_func {
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool); void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8); void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *); void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *); void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
int (*wait_for_halt)(struct nvkm_falcon *, u32); int (*wait_for_halt)(struct nvkm_falcon *, u32);
int (*clear_interrupt)(struct nvkm_falcon *, u32); int (*clear_interrupt)(struct nvkm_falcon *, u32);
void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr); void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
...@@ -112,7 +113,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8, ...@@ -112,7 +113,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
bool); bool);
void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8); void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *); void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *); void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32); void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
void nvkm_falcon_start(struct nvkm_falcon *); void nvkm_falcon_start(struct nvkm_falcon *);
int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32); int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
......
#ifndef __NVKM_FIFO_H__ #ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__ #define __NVKM_FIFO_H__
#include <core/engine.h> #include <core/engine.h>
#include <core/object.h>
#include <core/event.h> #include <core/event.h>
#define NVKM_FIFO_CHID_NR 4096 #define NVKM_FIFO_CHID_NR 4096
...@@ -21,7 +22,7 @@ struct nvkm_fifo_chan { ...@@ -21,7 +22,7 @@ struct nvkm_fifo_chan {
u16 chid; u16 chid;
struct nvkm_gpuobj *inst; struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *push; struct nvkm_gpuobj *push;
struct nvkm_vm *vm; struct nvkm_vmm *vmm;
void __iomem *user; void __iomem *user;
u64 addr; u64 addr;
u32 size; u32 size;
......
...@@ -8,17 +8,22 @@ struct nvkm_bar { ...@@ -8,17 +8,22 @@ struct nvkm_bar {
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
spinlock_t lock; spinlock_t lock;
bool bar2;
/* whether the BAR supports to be ioremapped WC or should be uncached */ /* whether the BAR supports to be ioremapped WC or should be uncached */
bool iomap_uncached; bool iomap_uncached;
}; };
struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
void nvkm_bar_bar2_init(struct nvkm_device *);
void nvkm_bar_bar2_fini(struct nvkm_device *);
struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
void nvkm_bar_flush(struct nvkm_bar *); void nvkm_bar_flush(struct nvkm_bar *);
struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **); int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **); int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **); int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **); int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
#endif #endif
#ifndef __NVKM_FB_H__ #ifndef __NVKM_FB_H__
#define __NVKM_FB_H__ #define __NVKM_FB_H__
#include <core/subdev.h> #include <core/subdev.h>
#include <core/mm.h>
#include <subdev/mmu.h>
/* memory type/access flags, do not match hardware values */ /* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO 1 #define NV_MEM_ACCESS_RO 1
...@@ -21,22 +20,6 @@ ...@@ -21,22 +20,6 @@
#define NVKM_RAM_TYPE_VM 0x7f #define NVKM_RAM_TYPE_VM 0x7f
#define NV_MEM_COMP_VM 0x03 #define NV_MEM_COMP_VM 0x03
struct nvkm_mem {
struct drm_device *dev;
struct nvkm_vma bar_vma;
struct nvkm_vma vma[2];
u8 page_shift;
struct nvkm_mm_node *tag;
struct nvkm_mm_node *mem;
dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
struct sg_table *sg;
};
struct nvkm_fb_tile { struct nvkm_fb_tile {
struct nvkm_mm_node *tag; struct nvkm_mm_node *tag;
u32 addr; u32 addr;
...@@ -50,6 +33,7 @@ struct nvkm_fb { ...@@ -50,6 +33,7 @@ struct nvkm_fb {
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
struct nvkm_ram *ram; struct nvkm_ram *ram;
struct nvkm_mm tags;
struct { struct {
struct nvkm_fb_tile region[16]; struct nvkm_fb_tile region[16];
...@@ -62,7 +46,6 @@ struct nvkm_fb { ...@@ -62,7 +46,6 @@ struct nvkm_fb {
struct nvkm_memory *mmu_wr; struct nvkm_memory *mmu_wr;
}; };
bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size, void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *); u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *); void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
...@@ -129,8 +112,11 @@ struct nvkm_ram { ...@@ -129,8 +112,11 @@ struct nvkm_ram {
u64 size; u64 size;
#define NVKM_RAM_MM_SHIFT 12 #define NVKM_RAM_MM_SHIFT 12
#define NVKM_RAM_MM_ANY (NVKM_MM_HEAP_ANY + 0)
#define NVKM_RAM_MM_NORMAL (NVKM_MM_HEAP_ANY + 1)
#define NVKM_RAM_MM_NOMAP (NVKM_MM_HEAP_ANY + 2)
#define NVKM_RAM_MM_MIXED (NVKM_MM_HEAP_ANY + 3)
struct nvkm_mm vram; struct nvkm_mm vram;
struct nvkm_mm tags;
u64 stolen; u64 stolen;
int ranks; int ranks;
...@@ -147,6 +133,10 @@ struct nvkm_ram { ...@@ -147,6 +133,10 @@ struct nvkm_ram {
struct nvkm_ram_data target; struct nvkm_ram_data target;
}; };
int
nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
bool contig, bool back, struct nvkm_memory **);
struct nvkm_ram_func { struct nvkm_ram_func {
u64 upper; u64 upper;
u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *, u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
...@@ -157,14 +147,8 @@ struct nvkm_ram_func { ...@@ -157,14 +147,8 @@ struct nvkm_ram_func {
void *(*dtor)(struct nvkm_ram *); void *(*dtor)(struct nvkm_ram *);
int (*init)(struct nvkm_ram *); int (*init)(struct nvkm_ram *);
int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
u32 type, struct nvkm_mem **);
void (*put)(struct nvkm_ram *, struct nvkm_mem **);
int (*calc)(struct nvkm_ram *, u32 freq); int (*calc)(struct nvkm_ram *, u32 freq);
int (*prog)(struct nvkm_ram *); int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *); void (*tidy)(struct nvkm_ram *);
}; };
extern const u8 gf100_pte_storage_type_map[256];
#endif #endif
...@@ -9,6 +9,7 @@ struct nvkm_instmem { ...@@ -9,6 +9,7 @@ struct nvkm_instmem {
spinlock_t lock; spinlock_t lock;
struct list_head list; struct list_head list;
struct list_head boot;
u32 reserved; u32 reserved;
struct nvkm_memory *vbios; struct nvkm_memory *vbios;
......
...@@ -14,8 +14,7 @@ struct nvkm_ltc { ...@@ -14,8 +14,7 @@ struct nvkm_ltc {
u32 num_tags; u32 num_tags;
u32 tag_base; u32 tag_base;
struct nvkm_mm tags; struct nvkm_memory *tag_ram;
struct nvkm_mm_node *tag_ram;
int zbc_min; int zbc_min;
int zbc_max; int zbc_max;
...@@ -23,9 +22,7 @@ struct nvkm_ltc { ...@@ -23,9 +22,7 @@ struct nvkm_ltc {
u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT]; u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
}; };
int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **); void nvkm_ltc_tags_clear(struct nvkm_device *, u32 first, u32 count);
void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]); int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32); int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
......
#ifndef __NVKM_MMU_H__ #ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__ #define __NVKM_MMU_H__
#include <core/subdev.h> #include <core/subdev.h>
#include <core/mm.h>
struct nvkm_device;
struct nvkm_mem;
struct nvkm_vm_pgt {
struct nvkm_memory *mem[2];
u32 refcount[2];
};
struct nvkm_vm_pgd {
struct list_head head;
struct nvkm_gpuobj *obj;
};
struct nvkm_vma { struct nvkm_vma {
struct list_head head; struct list_head head;
int refcount; struct rb_node tree;
struct nvkm_vm *vm; u64 addr;
struct nvkm_mm_node *node; u64 size:50;
u64 offset; bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
u32 access; bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
#define NVKM_VMA_PAGE_NONE 7
u8 page:3; /* Requested page type (index, or NONE for automatic). */
u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
bool used:1; /* Region allocated. */
bool part:1; /* Region was split from an allocated region by map(). */
bool user:1; /* Region user-allocated. */
bool busy:1; /* Region busy (for temporarily preventing user access). */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */
}; };
struct nvkm_vm { struct nvkm_vmm {
const struct nvkm_vmm_func *func;
struct nvkm_mmu *mmu; struct nvkm_mmu *mmu;
const char *name;
u32 debug;
struct kref kref;
struct mutex mutex; struct mutex mutex;
struct nvkm_mm mm;
struct kref refcount;
struct list_head pgd_list; u64 start;
u64 limit;
struct nvkm_vmm_pt *pd;
struct list_head join;
struct list_head list;
struct rb_root free;
struct rb_root root;
bool bootstrapped;
atomic_t engref[NVKM_SUBDEV_NR]; atomic_t engref[NVKM_SUBDEV_NR];
struct nvkm_vm_pgt *pgt; dma_addr_t null;
u32 fpde; void *nullp;
u32 lpde;
}; };
int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset, int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
struct lock_class_key *, struct nvkm_vm **); struct lock_class_key *, const char *name, struct nvkm_vmm **);
int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd); struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
int nvkm_vm_boot(struct nvkm_vm *, u64 size); void nvkm_vmm_unref(struct nvkm_vmm **);
int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access, int nvkm_vmm_boot(struct nvkm_vmm *);
struct nvkm_vma *); int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
void nvkm_vm_put(struct nvkm_vma *); void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *); int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *); void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
void nvkm_vm_unmap(struct nvkm_vma *);
void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length); struct nvkm_vmm_map {
struct nvkm_memory *memory;
u64 offset;
struct nvkm_mm_node *mem;
struct scatterlist *sgl;
dma_addr_t *dma;
u64 off;
const struct nvkm_vmm_page *page;
struct nvkm_tags *tags;
u64 next;
u64 type;
u64 ctag;
};
int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
struct nvkm_vmm_map *);
void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
struct nvkm_mmu { struct nvkm_mmu {
const struct nvkm_mmu_func *func; const struct nvkm_mmu_func *func;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
u64 limit;
u8 dma_bits; u8 dma_bits;
u8 lpg_shift;
int heap_nr;
struct {
#define NVKM_MEM_VRAM 0x01
#define NVKM_MEM_HOST 0x02
#define NVKM_MEM_COMP 0x04
#define NVKM_MEM_DISP 0x08
u8 type;
u64 size;
} heap[4];
int type_nr;
struct {
#define NVKM_MEM_KIND 0x10
#define NVKM_MEM_MAPPABLE 0x20
#define NVKM_MEM_COHERENT 0x40
#define NVKM_MEM_UNCACHED 0x80
u8 type;
u8 heap;
} type[16];
struct nvkm_vmm *vmm;
struct {
struct mutex mutex;
struct list_head list;
} ptc, ptp;
struct nvkm_device_oclass user;
}; };
int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
#endif #endif
...@@ -97,4 +97,5 @@ int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **); ...@@ -97,4 +97,5 @@ int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
#endif #endif
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "nouveau_gem.h" #include "nouveau_gem.h"
#include "nouveau_chan.h" #include "nouveau_chan.h"
#include "nouveau_abi16.h" #include "nouveau_abi16.h"
#include "nouveau_vmm.h"
static struct nouveau_abi16 * static struct nouveau_abi16 *
nouveau_abi16(struct drm_file *file_priv) nouveau_abi16(struct drm_file *file_priv)
...@@ -134,7 +135,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, ...@@ -134,7 +135,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
} }
if (chan->ntfy) { if (chan->ntfy) {
nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy); nouveau_bo_unpin(chan->ntfy);
drm_gem_object_unreference_unlocked(&chan->ntfy->gem); drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
} }
...@@ -184,29 +185,33 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) ...@@ -184,29 +185,33 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = device->info.chipset; getparam->value = device->info.chipset;
break; break;
case NOUVEAU_GETPARAM_PCI_VENDOR: case NOUVEAU_GETPARAM_PCI_VENDOR:
if (nvxx_device(device)->func->pci) if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
getparam->value = dev->pdev->vendor; getparam->value = dev->pdev->vendor;
else else
getparam->value = 0; getparam->value = 0;
break; break;
case NOUVEAU_GETPARAM_PCI_DEVICE: case NOUVEAU_GETPARAM_PCI_DEVICE:
if (nvxx_device(device)->func->pci) if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
getparam->value = dev->pdev->device; getparam->value = dev->pdev->device;
else else
getparam->value = 0; getparam->value = 0;
break; break;
case NOUVEAU_GETPARAM_BUS_TYPE: case NOUVEAU_GETPARAM_BUS_TYPE:
if (!nvxx_device(device)->func->pci) switch (device->info.platform) {
getparam->value = 3; case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
else case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
getparam->value = 0; case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
else case NV_DEVICE_INFO_V0_IGP :
if (!pci_is_pcie(dev->pdev)) if (!pci_is_pcie(dev->pdev))
getparam->value = 1; getparam->value = 1;
else else
getparam->value = 2; getparam->value = 2;
break; break;
default:
WARN_ON(1);
break;
}
case NOUVEAU_GETPARAM_FB_SIZE: case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = drm->gem.vram_available; getparam->value = drm->gem.vram_available;
break; break;
...@@ -329,8 +334,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) ...@@ -329,8 +334,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done; goto done;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vma_add(chan->ntfy, cli->vm, ret = nouveau_vma_new(chan->ntfy, &cli->vmm, &chan->ntfy_vma);
&chan->ntfy_vma);
if (ret) if (ret)
goto done; goto done;
} }
...@@ -340,7 +344,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) ...@@ -340,7 +344,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret) if (ret)
goto done; goto done;
ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1); ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
done: done:
if (ret) if (ret)
nouveau_abi16_chan_fini(abi16, chan); nouveau_abi16_chan_fini(abi16, chan);
...@@ -548,8 +552,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) ...@@ -548,8 +552,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.target = NV_DMA_V0_TARGET_VM; args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM; args.access = NV_DMA_V0_ACCESS_VM;
args.start += chan->ntfy_vma.offset; args.start += chan->ntfy_vma->addr;
args.limit += chan->ntfy_vma.offset; args.limit += chan->ntfy_vma->addr;
} else } else
if (drm->agp.bridge) { if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP; args.target = NV_DMA_V0_TARGET_AGP;
......
...@@ -23,7 +23,7 @@ struct nouveau_abi16_chan { ...@@ -23,7 +23,7 @@ struct nouveau_abi16_chan {
struct nouveau_channel *chan; struct nouveau_channel *chan;
struct list_head notifiers; struct list_head notifiers;
struct nouveau_bo *ntfy; struct nouveau_bo *ntfy;
struct nvkm_vma ntfy_vma; struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap; struct nvkm_mm heap;
}; };
......
...@@ -1478,9 +1478,13 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, ...@@ -1478,9 +1478,13 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
case 1: case 1:
entry->dpconf.link_bw = 270000; entry->dpconf.link_bw = 270000;
break; break;
default: case 2:
entry->dpconf.link_bw = 540000; entry->dpconf.link_bw = 540000;
break; break;
case 3:
default:
entry->dpconf.link_bw = 810000;
break;
} }
switch ((conf & 0x0f000000) >> 24) { switch ((conf & 0x0f000000) >> 24) {
case 0xf: case 0xf:
......
...@@ -24,12 +24,16 @@ struct nouveau_bo { ...@@ -24,12 +24,16 @@ struct nouveau_bo {
bool validate_mapped; bool validate_mapped;
struct list_head vma_list; struct list_head vma_list;
unsigned page_shift;
struct nouveau_cli *cli; struct nouveau_cli *cli;
u32 tile_mode; unsigned contig:1;
u32 tile_flags; unsigned page:5;
unsigned kind:8;
unsigned comp:3;
unsigned zeta:3;
unsigned mode;
struct nouveau_drm_tile *tile; struct nouveau_drm_tile *tile;
/* Only valid if allocated via nouveau_gem_new() and iff you hold a /* Only valid if allocated via nouveau_gem_new() and iff you hold a
...@@ -89,13 +93,6 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, ...@@ -89,13 +93,6 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo); void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo); void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
struct nvkm_vma *
nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
int nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
struct nvkm_vma *);
void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
/* TODO: submit equivalent to TTM generic API upstream? */ /* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem * static inline void __iomem *
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "nouveau_chan.h" #include "nouveau_chan.h"
#include "nouveau_fence.h" #include "nouveau_fence.h"
#include "nouveau_abi16.h" #include "nouveau_abi16.h"
#include "nouveau_vmm.h"
MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM"); MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf; int nouveau_vram_pushbuf;
...@@ -83,6 +84,14 @@ nouveau_channel_del(struct nouveau_channel **pchan) ...@@ -83,6 +84,14 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{ {
struct nouveau_channel *chan = *pchan; struct nouveau_channel *chan = *pchan;
if (chan) { if (chan) {
struct nouveau_cli *cli = (void *)chan->user.client;
bool super;
if (cli) {
super = cli->base.super;
cli->base.super = true;
}
if (chan->fence) if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan); nouveau_fence(chan->drm)->context_del(chan);
nvif_object_fini(&chan->nvsw); nvif_object_fini(&chan->nvsw);
...@@ -91,12 +100,15 @@ nouveau_channel_del(struct nouveau_channel **pchan) ...@@ -91,12 +100,15 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_notify_fini(&chan->kill); nvif_notify_fini(&chan->kill);
nvif_object_fini(&chan->user); nvif_object_fini(&chan->user);
nvif_object_fini(&chan->push.ctxdma); nvif_object_fini(&chan->push.ctxdma);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); nouveau_vma_del(&chan->push.vma);
nouveau_bo_unmap(chan->push.buffer); nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->pin_refcnt) if (chan->push.buffer && chan->push.buffer->pin_refcnt)
nouveau_bo_unpin(chan->push.buffer); nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan); kfree(chan);
if (cli)
cli->base.super = super;
} }
*pchan = NULL; *pchan = NULL;
} }
...@@ -106,7 +118,6 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, ...@@ -106,7 +118,6 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
u32 size, struct nouveau_channel **pchan) u32 size, struct nouveau_channel **pchan)
{ {
struct nouveau_cli *cli = (void *)device->object.client; struct nouveau_cli *cli = (void *)device->object.client;
struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {}; struct nv_dma_v0 args = {};
struct nouveau_channel *chan; struct nouveau_channel *chan;
u32 target; u32 target;
...@@ -142,11 +153,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, ...@@ -142,11 +153,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
* pushbuf lives in, this is because the GEM code requires that * pushbuf lives in, this is because the GEM code requires that
* we be able to call out to other (indirect) push buffers * we be able to call out to other (indirect) push buffers
*/ */
chan->push.vma.offset = chan->push.buffer->bo.offset; chan->push.addr = chan->push.buffer->bo.offset;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm, ret = nouveau_vma_new(chan->push.buffer, &cli->vmm,
&chan->push.vma); &chan->push.vma);
if (ret) { if (ret) {
nouveau_channel_del(pchan); nouveau_channel_del(pchan);
return ret; return ret;
...@@ -155,7 +166,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, ...@@ -155,7 +166,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM; args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM; args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0; args.start = 0;
args.limit = cli->vm->mmu->limit - 1; args.limit = cli->vmm.vmm.limit - 1;
chan->push.addr = chan->push.vma->addr;
} else } else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) { if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
...@@ -185,7 +198,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, ...@@ -185,7 +198,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM; args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR; args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0; args.start = 0;
args.limit = mmu->limit - 1; args.limit = cli->vmm.vmm.limit - 1;
} }
} }
...@@ -203,6 +216,7 @@ static int ...@@ -203,6 +216,7 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 engine, struct nouveau_channel **pchan) u32 engine, struct nouveau_channel **pchan)
{ {
struct nouveau_cli *cli = (void *)device->object.client;
static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A, static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
MAXWELL_CHANNEL_GPFIFO_A, MAXWELL_CHANNEL_GPFIFO_A,
KEPLER_CHANNEL_GPFIFO_B, KEPLER_CHANNEL_GPFIFO_B,
...@@ -233,22 +247,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, ...@@ -233,22 +247,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
args.kepler.version = 0; args.kepler.version = 0;
args.kepler.engines = engine; args.kepler.engines = engine;
args.kepler.ilength = 0x02000; args.kepler.ilength = 0x02000;
args.kepler.ioffset = 0x10000 + chan->push.vma.offset; args.kepler.ioffset = 0x10000 + chan->push.addr;
args.kepler.vm = 0; args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.kepler); size = sizeof(args.kepler);
} else } else
if (oclass[0] >= FERMI_CHANNEL_GPFIFO) { if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
args.fermi.version = 0; args.fermi.version = 0;
args.fermi.ilength = 0x02000; args.fermi.ilength = 0x02000;
args.fermi.ioffset = 0x10000 + chan->push.vma.offset; args.fermi.ioffset = 0x10000 + chan->push.addr;
args.fermi.vm = 0; args.fermi.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.fermi); size = sizeof(args.fermi);
} else { } else {
args.nv50.version = 0; args.nv50.version = 0;
args.nv50.ilength = 0x02000; args.nv50.ilength = 0x02000;
args.nv50.ioffset = 0x10000 + chan->push.vma.offset; args.nv50.ioffset = 0x10000 + chan->push.addr;
args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma); args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
args.nv50.vm = 0; args.nv50.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.nv50); size = sizeof(args.nv50);
} }
...@@ -293,7 +307,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device, ...@@ -293,7 +307,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
/* create channel object */ /* create channel object */
args.version = 0; args.version = 0;
args.pushbuf = nvif_handle(&chan->push.ctxdma); args.pushbuf = nvif_handle(&chan->push.ctxdma);
args.offset = chan->push.vma.offset; args.offset = chan->push.addr;
do { do {
ret = nvif_object_init(&device->object, 0, *oclass++, ret = nvif_object_init(&device->object, 0, *oclass++,
...@@ -314,11 +328,10 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) ...@@ -314,11 +328,10 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nvif_device *device = chan->device; struct nvif_device *device = chan->device;
struct nouveau_cli *cli = (void *)chan->user.client; struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_drm *drm = chan->drm; struct nouveau_drm *drm = chan->drm;
struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {}; struct nv_dma_v0 args = {};
int ret, i; int ret, i;
nvif_object_map(&chan->user); nvif_object_map(&chan->user, NULL, 0);
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) { if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
ret = nvif_notify_init(&chan->user, nouveau_channel_killed, ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
...@@ -339,7 +352,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) ...@@ -339,7 +352,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM; args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM; args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0; args.start = 0;
args.limit = cli->vm->mmu->limit - 1; args.limit = cli->vmm.vmm.limit - 1;
} else { } else {
args.target = NV_DMA_V0_TARGET_VRAM; args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR; args.access = NV_DMA_V0_ACCESS_RDWR;
...@@ -356,7 +369,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) ...@@ -356,7 +369,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM; args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM; args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0; args.start = 0;
args.limit = cli->vm->mmu->limit - 1; args.limit = cli->vmm.vmm.limit - 1;
} else } else
if (chan->drm->agp.bridge) { if (chan->drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP; args.target = NV_DMA_V0_TARGET_AGP;
...@@ -368,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) ...@@ -368,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM; args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR; args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0; args.start = 0;
args.limit = mmu->limit - 1; args.limit = cli->vmm.vmm.limit - 1;
} }
ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY, ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
......
...@@ -16,8 +16,9 @@ struct nouveau_channel { ...@@ -16,8 +16,9 @@ struct nouveau_channel {
struct { struct {
struct nouveau_bo *buffer; struct nouveau_bo *buffer;
struct nvkm_vma vma; struct nouveau_vma *vma;
struct nvif_object ctxdma; struct nvif_object ctxdma;
u64 addr;
} push; } push;
/* TODO: this will be reworked in the near future */ /* TODO: this will be reworked in the near future */
......
#ifndef __NOUVEAU_DISPLAY_H__ #ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__ #define __NOUVEAU_DISPLAY_H__
#include <subdev/mmu.h>
#include "nouveau_drv.h" #include "nouveau_drv.h"
struct nouveau_framebuffer { struct nouveau_framebuffer {
struct drm_framebuffer base; struct drm_framebuffer base;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
struct nvkm_vma vma; struct nouveau_vma *vma;
u32 r_handle; u32 r_handle;
u32 r_format; u32 r_format;
u32 r_pitch; u32 r_pitch;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_dma.h" #include "nouveau_dma.h"
#include "nouveau_vmm.h"
void void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
...@@ -71,11 +72,11 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) ...@@ -71,11 +72,11 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
return -EBUSY; return -EBUSY;
} }
if (val < chan->push.vma.offset || if (val < chan->push.addr ||
val > chan->push.vma.offset + (chan->dma.max << 2)) val > chan->push.addr + (chan->dma.max << 2))
return -EINVAL; return -EINVAL;
return (val - chan->push.vma.offset) >> 2; return (val - chan->push.addr) >> 2;
} }
void void
...@@ -84,13 +85,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, ...@@ -84,13 +85,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
{ {
struct nouveau_cli *cli = (void *)chan->user.client; struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_bo *pb = chan->push.buffer; struct nouveau_bo *pb = chan->push.buffer;
struct nvkm_vma *vma; struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
u64 offset; u64 offset;
vma = nouveau_bo_vma_find(bo, cli->vm); vma = nouveau_vma_find(bo, &cli->vmm);
BUG_ON(!vma); BUG_ON(!vma);
offset = vma->offset + delta; offset = vma->addr + delta;
BUG_ON(chan->dma.ib_free < 1); BUG_ON(chan->dma.ib_free < 1);
...@@ -224,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) ...@@ -224,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
* instruct the GPU to jump back to the start right * instruct the GPU to jump back to the start right
* after processing the currently pending commands. * after processing the currently pending commands.
*/ */
OUT_RING(chan, chan->push.vma.offset | 0x20000000); OUT_RING(chan, chan->push.addr | 0x20000000);
/* wait for GET to depart from the skips area. /* wait for GET to depart from the skips area.
* prevents writing GET==PUT and causing a race * prevents writing GET==PUT and causing a race
......
...@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data) ...@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
#define WRITE_PUT(val) do { \ #define WRITE_PUT(val) do { \
mb(); \ mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \ nouveau_bo_rd32(chan->push.buffer, 0); \
nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.addr);\
} while (0) } while (0)
static inline void static inline void
......
...@@ -111,33 +111,119 @@ nouveau_name(struct drm_device *dev) ...@@ -111,33 +111,119 @@ nouveau_name(struct drm_device *dev)
return nouveau_platform_name(to_platform_device(dev->dev)); return nouveau_platform_name(to_platform_device(dev->dev));
} }
static inline bool
nouveau_cli_work_ready(struct dma_fence *fence, bool wait)
{
if (!dma_fence_is_signaled(fence)) {
if (!wait)
return false;
WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
}
dma_fence_put(fence);
return true;
}
static void
nouveau_cli_work_flush(struct nouveau_cli *cli, bool wait)
{
struct nouveau_cli_work *work, *wtmp;
mutex_lock(&cli->lock);
list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
if (!work->fence || nouveau_cli_work_ready(work->fence, wait)) {
list_del(&work->head);
work->func(work);
}
}
mutex_unlock(&cli->lock);
}
static void
nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
schedule_work(&work->cli->work);
}
void
nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
struct nouveau_cli_work *work)
{
work->fence = dma_fence_get(fence);
work->cli = cli;
mutex_lock(&cli->lock);
list_add_tail(&work->head, &cli->worker);
mutex_unlock(&cli->lock);
if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
nouveau_cli_work_fence(fence, &work->cb);
}
static void
nouveau_cli_work(struct work_struct *w)
{
struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
nouveau_cli_work_flush(cli, false);
}
static void static void
nouveau_cli_fini(struct nouveau_cli *cli) nouveau_cli_fini(struct nouveau_cli *cli)
{ {
nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); nouveau_cli_work_flush(cli, true);
usif_client_fini(cli); usif_client_fini(cli);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_fini(&cli->mmu);
nvif_device_fini(&cli->device); nvif_device_fini(&cli->device);
mutex_lock(&cli->drm->master.lock);
nvif_client_fini(&cli->base); nvif_client_fini(&cli->base);
mutex_unlock(&cli->drm->master.lock);
} }
static int static int
nouveau_cli_init(struct nouveau_drm *drm, const char *sname, nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
struct nouveau_cli *cli) struct nouveau_cli *cli)
{ {
static const struct nvif_mclass
mems[] = {
{ NVIF_CLASS_MEM_GF100, -1 },
{ NVIF_CLASS_MEM_NV50 , -1 },
{ NVIF_CLASS_MEM_NV04 , -1 },
{}
};
static const struct nvif_mclass
mmus[] = {
{ NVIF_CLASS_MMU_GF100, -1 },
{ NVIF_CLASS_MMU_NV50 , -1 },
{ NVIF_CLASS_MMU_NV04 , -1 },
{}
};
static const struct nvif_mclass
vmms[] = {
{ NVIF_CLASS_VMM_GP100, -1 },
{ NVIF_CLASS_VMM_GM200, -1 },
{ NVIF_CLASS_VMM_GF100, -1 },
{ NVIF_CLASS_VMM_NV50 , -1 },
{ NVIF_CLASS_VMM_NV04 , -1 },
{}
};
u64 device = nouveau_name(drm->dev); u64 device = nouveau_name(drm->dev);
int ret; int ret;
snprintf(cli->name, sizeof(cli->name), "%s", sname); snprintf(cli->name, sizeof(cli->name), "%s", sname);
cli->dev = drm->dev; cli->drm = drm;
mutex_init(&cli->mutex); mutex_init(&cli->mutex);
usif_client_init(cli); usif_client_init(cli);
if (cli == &drm->client) { INIT_WORK(&cli->work, nouveau_cli_work);
INIT_LIST_HEAD(&cli->worker);
mutex_init(&cli->lock);
if (cli == &drm->master) {
ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug, ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
cli->name, device, &cli->base); cli->name, device, &cli->base);
} else { } else {
ret = nvif_client_init(&drm->client.base, cli->name, device, mutex_lock(&drm->master.lock);
ret = nvif_client_init(&drm->master.base, cli->name, device,
&cli->base); &cli->base);
mutex_unlock(&drm->master.lock);
} }
if (ret) { if (ret) {
NV_ERROR(drm, "Client allocation failed: %d\n", ret); NV_ERROR(drm, "Client allocation failed: %d\n", ret);
...@@ -154,6 +240,38 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, ...@@ -154,6 +240,38 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
goto done; goto done;
} }
ret = nvif_mclass(&cli->device.object, mmus);
if (ret < 0) {
NV_ERROR(drm, "No supported MMU class\n");
goto done;
}
ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
if (ret) {
NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, vmms);
if (ret < 0) {
NV_ERROR(drm, "No supported VMM class\n");
goto done;
}
ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
if (ret) {
NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, mems);
if (ret < 0) {
NV_ERROR(drm, "No supported MEM class\n");
goto done;
}
cli->mem = &mems[ret];
return 0;
done: done:
if (ret) if (ret)
nouveau_cli_fini(cli); nouveau_cli_fini(cli);
...@@ -433,6 +551,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -433,6 +551,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm; dev->dev_private = drm;
drm->dev = dev; drm->dev = dev;
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
return ret;
ret = nouveau_cli_init(drm, "DRM", &drm->client); ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret) if (ret)
return ret; return ret;
...@@ -456,21 +578,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -456,21 +578,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_vga_init(drm); nouveau_vga_init(drm);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
if (!nvxx_device(&drm->client.device)->mmu) {
ret = -ENOSYS;
goto fail_device;
}
ret = nvkm_vm_new(nvxx_device(&drm->client.device),
0, (1ULL << 40), 0x1000, NULL,
&drm->client.vm);
if (ret)
goto fail_device;
nvxx_client(&drm->client.base)->vm = drm->client.vm;
}
ret = nouveau_ttm_init(drm); ret = nouveau_ttm_init(drm);
if (ret) if (ret)
goto fail_ttm; goto fail_ttm;
...@@ -516,8 +623,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) ...@@ -516,8 +623,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_ttm_fini(drm); nouveau_ttm_fini(drm);
fail_ttm: fail_ttm:
nouveau_vga_fini(drm); nouveau_vga_fini(drm);
fail_device:
nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
kfree(drm); kfree(drm);
return ret; return ret;
} }
...@@ -550,6 +657,7 @@ nouveau_drm_unload(struct drm_device *dev) ...@@ -550,6 +657,7 @@ nouveau_drm_unload(struct drm_device *dev)
if (drm->hdmi_device) if (drm->hdmi_device)
pci_dev_put(drm->hdmi_device); pci_dev_put(drm->hdmi_device);
nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
kfree(drm); kfree(drm);
} }
...@@ -618,7 +726,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) ...@@ -618,7 +726,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
} }
NV_DEBUG(drm, "suspending object tree...\n"); NV_DEBUG(drm, "suspending object tree...\n");
ret = nvif_client_suspend(&drm->client.base); ret = nvif_client_suspend(&drm->master.base);
if (ret) if (ret)
goto fail_client; goto fail_client;
...@@ -642,7 +750,7 @@ nouveau_do_resume(struct drm_device *dev, bool runtime) ...@@ -642,7 +750,7 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n"); NV_DEBUG(drm, "resuming object tree...\n");
nvif_client_resume(&drm->client.base); nvif_client_resume(&drm->master.base);
NV_DEBUG(drm, "resuming fence...\n"); NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume) if (drm->fence && nouveau_fence(drm)->resume)
...@@ -850,15 +958,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) ...@@ -850,15 +958,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
cli->base.super = false; cli->base.super = false;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
(1ULL << 40), 0x1000, NULL, &cli->vm);
if (ret)
goto done;
nvxx_client(&cli->base)->vm = cli->vm;
}
fpriv->driver_priv = cli; fpriv->driver_priv = cli;
mutex_lock(&drm->client.mutex); mutex_lock(&drm->client.mutex);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#define DRIVER_EMAIL "nouveau@lists.freedesktop.org" #define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
#define DRIVER_NAME "nouveau" #define DRIVER_NAME "nouveau"
#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla" #define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla/Tegra K1+"
#define DRIVER_DATE "20120801" #define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#include <nvif/client.h> #include <nvif/client.h>
#include <nvif/device.h> #include <nvif/device.h>
#include <nvif/ioctl.h> #include <nvif/ioctl.h>
#include <nvif/mmu.h>
#include <nvif/vmm.h>
#include <drm/drmP.h> #include <drm/drmP.h>
...@@ -61,6 +63,7 @@ struct platform_device; ...@@ -61,6 +63,7 @@ struct platform_device;
#include "nouveau_fence.h" #include "nouveau_fence.h"
#include "nouveau_bios.h" #include "nouveau_bios.h"
#include "nouveau_vmm.h"
struct nouveau_drm_tile { struct nouveau_drm_tile {
struct nouveau_fence *fence; struct nouveau_fence *fence;
...@@ -86,19 +89,37 @@ enum nouveau_drm_handle { ...@@ -86,19 +89,37 @@ enum nouveau_drm_handle {
struct nouveau_cli { struct nouveau_cli {
struct nvif_client base; struct nvif_client base;
struct drm_device *dev; struct nouveau_drm *drm;
struct mutex mutex; struct mutex mutex;
struct nvif_device device; struct nvif_device device;
struct nvif_mmu mmu;
struct nouveau_vmm vmm;
const struct nvif_mclass *mem;
struct nvkm_vm *vm; /*XXX*/
struct list_head head; struct list_head head;
void *abi16; void *abi16;
struct list_head objects; struct list_head objects;
struct list_head notifys; struct list_head notifys;
char name[32]; char name[32];
struct work_struct work;
struct list_head worker;
struct mutex lock;
}; };
struct nouveau_cli_work {
void (*func)(struct nouveau_cli_work *);
struct nouveau_cli *cli;
struct list_head head;
struct dma_fence *fence;
struct dma_fence_cb cb;
};
void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
struct nouveau_cli_work *);
static inline struct nouveau_cli * static inline struct nouveau_cli *
nouveau_cli(struct drm_file *fpriv) nouveau_cli(struct drm_file *fpriv)
{ {
...@@ -109,6 +130,7 @@ nouveau_cli(struct drm_file *fpriv) ...@@ -109,6 +130,7 @@ nouveau_cli(struct drm_file *fpriv)
#include <nvif/device.h> #include <nvif/device.h>
struct nouveau_drm { struct nouveau_drm {
struct nouveau_cli master;
struct nouveau_cli client; struct nouveau_cli client;
struct drm_device *dev; struct drm_device *dev;
...@@ -133,6 +155,9 @@ struct nouveau_drm { ...@@ -133,6 +155,9 @@ struct nouveau_drm {
struct nouveau_channel *chan; struct nouveau_channel *chan;
struct nvif_object copy; struct nvif_object copy;
int mtrr; int mtrr;
int type_vram;
int type_host;
int type_ncoh;
} ttm; } ttm;
/* GEM interface support */ /* GEM interface support */
...@@ -204,7 +229,7 @@ void nouveau_drm_device_remove(struct drm_device *dev); ...@@ -204,7 +229,7 @@ void nouveau_drm_device_remove(struct drm_device *dev);
#define NV_PRINTK(l,c,f,a...) do { \ #define NV_PRINTK(l,c,f,a...) do { \
struct nouveau_cli *_cli = (c); \ struct nouveau_cli *_cli = (c); \
dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a); \ dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a); \
} while(0) } while(0)
#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a) #define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a) #define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "nouveau_bo.h" #include "nouveau_bo.h"
#include "nouveau_fbcon.h" #include "nouveau_fbcon.h"
#include "nouveau_chan.h" #include "nouveau_chan.h"
#include "nouveau_vmm.h"
#include "nouveau_crtc.h" #include "nouveau_crtc.h"
...@@ -348,7 +349,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, ...@@ -348,7 +349,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel; chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) { if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma); ret = nouveau_vma_new(nvbo, &drm->client.vmm, &fb->vma);
if (ret) { if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL; chan = NULL;
...@@ -402,7 +403,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, ...@@ -402,7 +403,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
out_unlock: out_unlock:
if (chan) if (chan)
nouveau_bo_vma_del(fb->nvbo, &fb->vma); nouveau_vma_del(&fb->vma);
nouveau_bo_unmap(fb->nvbo); nouveau_bo_unmap(fb->nvbo);
out_unpin: out_unpin:
nouveau_bo_unpin(fb->nvbo); nouveau_bo_unpin(fb->nvbo);
...@@ -429,7 +430,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) ...@@ -429,7 +430,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
drm_fb_helper_fini(&fbcon->helper); drm_fb_helper_fini(&fbcon->helper);
if (nouveau_fb->nvbo) { if (nouveau_fb->nvbo) {
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo); nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo); nouveau_bo_unpin(nouveau_fb->nvbo);
drm_framebuffer_unreference(&nouveau_fb->base); drm_framebuffer_unreference(&nouveau_fb->base);
......
...@@ -199,62 +199,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha ...@@ -199,62 +199,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
WARN_ON(ret); WARN_ON(ret);
} }
struct nouveau_fence_work {
struct work_struct work;
struct dma_fence_cb cb;
void (*func)(void *);
void *data;
};
static void
nouveau_fence_work_handler(struct work_struct *kwork)
{
struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
work->func(work->data);
kfree(work);
}
static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
schedule_work(&work->work);
}
void
nouveau_fence_work(struct dma_fence *fence,
void (*func)(void *), void *data)
{
struct nouveau_fence_work *work;
if (dma_fence_is_signaled(fence))
goto err;
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
/*
* this might not be a nouveau fence any more,
* so force a lazy wait here
*/
WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
true, false));
goto err;
}
INIT_WORK(&work->work, nouveau_fence_work_handler);
work->func = func;
work->data = data;
if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
goto err_free;
return;
err_free:
kfree(work);
err:
func(data);
}
int int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{ {
...@@ -474,8 +418,6 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, ...@@ -474,8 +418,6 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
if (!fence) if (!fence)
return -ENOMEM; return -ENOMEM;
fence->sysmem = sysmem;
ret = nouveau_fence_emit(fence, chan); ret = nouveau_fence_emit(fence, chan);
if (ret) if (ret)
nouveau_fence_unref(&fence); nouveau_fence_unref(&fence);
......
...@@ -12,8 +12,6 @@ struct nouveau_fence { ...@@ -12,8 +12,6 @@ struct nouveau_fence {
struct list_head head; struct list_head head;
bool sysmem;
struct nouveau_channel __rcu *channel; struct nouveau_channel __rcu *channel;
unsigned long timeout; unsigned long timeout;
}; };
...@@ -24,7 +22,6 @@ void nouveau_fence_unref(struct nouveau_fence **); ...@@ -24,7 +22,6 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *); bool nouveau_fence_done(struct nouveau_fence *);
void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
...@@ -90,14 +87,12 @@ int nouveau_flip_complete(struct nvif_notify *); ...@@ -90,14 +87,12 @@ int nouveau_flip_complete(struct nvif_notify *);
struct nv84_fence_chan { struct nv84_fence_chan {
struct nouveau_fence_chan base; struct nouveau_fence_chan base;
struct nvkm_vma vma; struct nouveau_vma *vma;
struct nvkm_vma vma_gart;
}; };
struct nv84_fence_priv { struct nv84_fence_priv {
struct nouveau_fence_priv base; struct nouveau_fence_priv base;
struct nouveau_bo *bo; struct nouveau_bo *bo;
struct nouveau_bo *bo_gart;
u32 *suspend; u32 *suspend;
struct mutex mutex; struct mutex mutex;
}; };
......
...@@ -31,6 +31,10 @@ ...@@ -31,6 +31,10 @@
#include "nouveau_ttm.h" #include "nouveau_ttm.h"
#include "nouveau_gem.h" #include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_vmm.h"
#include <nvif/class.h>
void void
nouveau_gem_object_del(struct drm_gem_object *gem) nouveau_gem_object_del(struct drm_gem_object *gem)
...@@ -64,66 +68,61 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -64,66 +68,61 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nvkm_vma *vma;
struct device *dev = drm->dev->dev; struct device *dev = drm->dev->dev;
struct nouveau_vma *vma;
int ret; int ret;
if (!cli->vm) if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0; return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret) if (ret)
return ret; return ret;
vma = nouveau_bo_vma_find(nvbo, cli->vm); ret = pm_runtime_get_sync(dev);
if (!vma) { if (ret < 0 && ret != -EACCES)
vma = kzalloc(sizeof(*vma), GFP_KERNEL); goto out;
if (!vma) {
ret = -ENOMEM;
goto out;
}
ret = pm_runtime_get_sync(dev);
if (ret < 0 && ret != -EACCES) {
kfree(vma);
goto out;
}
ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
if (ret)
kfree(vma);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
} else {
vma->refcount++;
}
ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out: out:
ttm_bo_unreserve(&nvbo->bo); ttm_bo_unreserve(&nvbo->bo);
return ret; return ret;
} }
struct nouveau_gem_object_unmap {
struct nouveau_cli_work work;
struct nouveau_vma *vma;
};
static void static void
nouveau_gem_object_delete(void *data) nouveau_gem_object_delete(struct nouveau_vma *vma)
{ {
struct nvkm_vma *vma = data; nouveau_vma_del(&vma);
nvkm_vm_unmap(vma);
nvkm_vm_put(vma);
kfree(vma);
} }
static void static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
{
struct nouveau_gem_object_unmap *work =
container_of(w, typeof(*work), work);
nouveau_gem_object_delete(work->vma);
kfree(work);
}
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{ {
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv; struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj; struct reservation_object_list *fobj;
struct nouveau_gem_object_unmap *work;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv); fobj = reservation_object_get_list(resv);
list_del(&vma->head); list_del_init(&vma->head);
if (fobj && fobj->shared_count > 1) if (fobj && fobj->shared_count > 1)
ttm_bo_wait(&nvbo->bo, false, false); ttm_bo_wait(&nvbo->bo, false, false);
...@@ -133,14 +132,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma) ...@@ -133,14 +132,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
else else
fence = reservation_object_get_excl(nvbo->bo.resv); fence = reservation_object_get_excl(nvbo->bo.resv);
if (fence && mapped) { if (!fence || !mapped) {
nouveau_fence_work(fence, nouveau_gem_object_delete, vma); nouveau_gem_object_delete(vma);
} else { return;
if (mapped) }
nvkm_vm_unmap(vma);
nvkm_vm_put(vma); if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
kfree(vma); WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
nouveau_gem_object_delete(vma);
return;
} }
work->work.func = nouveau_gem_object_delete_work;
work->vma = vma;
nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
} }
void void
...@@ -150,19 +155,19 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) ...@@ -150,19 +155,19 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev; struct device *dev = drm->dev->dev;
struct nvkm_vma *vma; struct nouveau_vma *vma;
int ret; int ret;
if (!cli->vm) if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return; return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret) if (ret)
return; return;
vma = nouveau_bo_vma_find(nvbo, cli->vm); vma = nouveau_vma_find(nvbo, &cli->vmm);
if (vma) { if (vma) {
if (--vma->refcount == 0) { if (--vma->refs == 0) {
ret = pm_runtime_get_sync(dev); ret = pm_runtime_get_sync(dev);
if (!WARN_ON(ret < 0 && ret != -EACCES)) { if (!WARN_ON(ret < 0 && ret != -EACCES)) {
nouveau_gem_object_unmap(nvbo, vma); nouveau_gem_object_unmap(nvbo, vma);
...@@ -179,7 +184,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, ...@@ -179,7 +184,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags, uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo) struct nouveau_bo **pnvbo)
{ {
struct nouveau_drm *drm = nouveau_drm(cli->dev); struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
u32 flags = 0; u32 flags = 0;
int ret; int ret;
...@@ -227,7 +232,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, ...@@ -227,7 +232,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{ {
struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nvkm_vma *vma; struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains)) if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains; rep->domain = nvbo->valid_domains;
...@@ -236,18 +241,25 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, ...@@ -236,18 +241,25 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
else else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset; rep->offset = nvbo->bo.offset;
if (cli->vm) { if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
vma = nouveau_bo_vma_find(nvbo, cli->vm); vma = nouveau_vma_find(nvbo, &cli->vmm);
if (!vma) if (!vma)
return -EINVAL; return -EINVAL;
rep->offset = vma->offset; rep->offset = vma->addr;
} }
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
rep->tile_mode = nvbo->tile_mode; rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->tile_flags; rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
rep->tile_flags |= nvbo->kind << 8;
else
if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
else
rep->tile_flags |= nvbo->zeta;
return 0; return 0;
} }
...@@ -255,18 +267,11 @@ int ...@@ -255,18 +267,11 @@ int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data, nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct drm_nouveau_gem_new *req = data; struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL; struct nouveau_bo *nvbo = NULL;
int ret = 0; int ret = 0;
if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
ret = nouveau_gem_new(cli, req->info.size, req->align, ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode, req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo); req->info.tile_flags, &nvbo);
...@@ -791,7 +796,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -791,7 +796,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
bo[push[i].bo_index].user_priv; bo[push[i].bo_index].user_priv;
uint32_t cmd; uint32_t cmd;
cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2); cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
cmd |= 0x20000000; cmd |= 0x20000000;
if (unlikely(cmd != req->suffix0)) { if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) { if (!nvbo->kmap.virtual) {
...@@ -843,7 +848,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ...@@ -843,7 +848,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->suffix1 = 0x00000000; req->suffix1 = 0x00000000;
} else { } else {
req->suffix0 = 0x20000000 | req->suffix0 = 0x20000000 |
(chan->push.vma.offset + ((chan->dma.cur + 2) << 2)); (chan->push.addr + ((chan->dma.cur + 2) << 2));
req->suffix1 = 0x00000000; req->suffix1 = 0x00000000;
} }
......
...@@ -6,9 +6,6 @@ ...@@ -6,9 +6,6 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_bo.h" #include "nouveau_bo.h"
#define nouveau_bo_tile_layout(nvbo) \
((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
static inline struct nouveau_bo * static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem) nouveau_gem_object(struct drm_gem_object *gem)
{ {
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_mem.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <nvif/class.h>
#include <nvif/if000a.h>
#include <nvif/if500b.h>
#include <nvif/if500d.h>
#include <nvif/if900b.h>
#include <nvif/if900d.h>
int
nouveau_mem_map(struct nouveau_mem *mem,
struct nvif_vmm *vmm, struct nvif_vma *vma)
{
union {
struct nv50_vmm_map_v0 nv50;
struct gf100_vmm_map_v0 gf100;
} args;
u32 argc = 0;
bool super;
int ret;
switch (vmm->object.oclass) {
case NVIF_CLASS_VMM_NV04:
break;
case NVIF_CLASS_VMM_NV50:
args.nv50.version = 0;
args.nv50.ro = 0;
args.nv50.priv = 0;
args.nv50.kind = mem->kind;
args.nv50.comp = mem->comp;
argc = sizeof(args.nv50);
break;
case NVIF_CLASS_VMM_GF100:
case NVIF_CLASS_VMM_GM200:
case NVIF_CLASS_VMM_GP100:
args.gf100.version = 0;
if (mem->mem.type & NVIF_MEM_VRAM)
args.gf100.vol = 0;
else
args.gf100.vol = 1;
args.gf100.ro = 0;
args.gf100.priv = 0;
args.gf100.kind = mem->kind;
argc = sizeof(args.gf100);
break;
default:
WARN_ON(1);
return -ENOSYS;
}
super = vmm->object.client->super;
vmm->object.client->super = true;
ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
&mem->mem, 0);
vmm->object.client->super = super;
return ret;
}
void
nouveau_mem_fini(struct nouveau_mem *mem)
{
nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
mutex_lock(&mem->cli->drm->master.lock);
nvif_mem_fini(&mem->mem);
mutex_unlock(&mem->cli->drm->master.lock);
}
int
nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_mem_ram_v0 args = {};
bool super = cli->base.super;
u8 type;
int ret;
if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
type = drm->ttm.type_ncoh;
else
type = drm->ttm.type_host;
if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
mem->comp = mem->kind = 0;
if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {
if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
mem->kind = mmu->kind[mem->kind];
mem->comp = 0;
}
if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
else args.dma = tt->dma_address;
mutex_lock(&drm->master.lock);
cli->base.super = true;
ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT,
reg->num_pages << PAGE_SHIFT,
&args, sizeof(args), &mem->mem);
cli->base.super = super;
mutex_unlock(&drm->master.lock);
return ret;
}
int
nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
bool super = cli->base.super;
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
int ret;
mutex_lock(&drm->master.lock);
cli->base.super = true;
switch (cli->mem->oclass) {
case NVIF_CLASS_MEM_GF100:
ret = nvif_mem_init_type(mmu, cli->mem->oclass,
drm->ttm.type_vram, page, size,
&(struct gf100_mem_v0) {
.contig = contig,
}, sizeof(struct gf100_mem_v0),
&mem->mem);
break;
case NVIF_CLASS_MEM_NV50:
ret = nvif_mem_init_type(mmu, cli->mem->oclass,
drm->ttm.type_vram, page, size,
&(struct nv50_mem_v0) {
.bankswz = mmu->kind[mem->kind] == 2,
.contig = contig,
}, sizeof(struct nv50_mem_v0),
&mem->mem);
break;
default:
ret = -ENOSYS;
WARN_ON(1);
break;
}
cli->base.super = super;
mutex_unlock(&drm->master.lock);
reg->start = mem->mem.addr >> PAGE_SHIFT;
return ret;
}
void
nouveau_mem_del(struct ttm_mem_reg *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
nouveau_mem_fini(mem);
kfree(reg->mm_node);
reg->mm_node = NULL;
}
int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
struct ttm_mem_reg *reg)
{
struct nouveau_mem *mem;
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
mem->cli = cli;
mem->kind = kind;
mem->comp = comp;
reg->mm_node = mem;
return 0;
}
#ifndef __NOUVEAU_MEM_H__
#define __NOUVEAU_MEM_H__
#include <drm/ttm/ttm_bo_api.h>
struct ttm_dma_tt;
#include <nvif/mem.h>
#include <nvif/vmm.h>
static inline struct nouveau_mem *
nouveau_mem(struct ttm_mem_reg *reg)
{
return reg->mm_node;
}
struct nouveau_mem {
struct nouveau_cli *cli;
u8 kind;
u8 comp;
struct nvif_mem mem;
struct nvif_vma vma[2];
};
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
struct ttm_mem_reg *);
void nouveau_mem_del(struct ttm_mem_reg *);
int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
#endif
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h" #include "nouveau_ttm.h"
struct nouveau_sgdma_be { struct nouveau_sgdma_be {
...@@ -9,7 +10,7 @@ struct nouveau_sgdma_be { ...@@ -9,7 +10,7 @@ struct nouveau_sgdma_be {
* nouve_bo.c works properly, otherwise have to move them here * nouve_bo.c works properly, otherwise have to move them here
*/ */
struct ttm_dma_tt ttm; struct ttm_dma_tt ttm;
struct nvkm_mem *node; struct nouveau_mem *mem;
}; };
static void static void
...@@ -27,19 +28,20 @@ static int ...@@ -27,19 +28,20 @@ static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nvkm_mem *node = reg->mm_node; struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
if (ttm->sg) {
node->sg = ttm->sg; ret = nouveau_mem_host(reg, &nvbe->ttm);
node->pages = NULL; if (ret)
} else { return ret;
node->sg = NULL;
node->pages = nvbe->ttm.dma_address; ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
} }
node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
nvkm_vm_map(&node->vma[0], node); nvbe->mem = mem;
nvbe->node = node;
return 0; return 0;
} }
...@@ -47,7 +49,7 @@ static int ...@@ -47,7 +49,7 @@ static int
nv04_sgdma_unbind(struct ttm_tt *ttm) nv04_sgdma_unbind(struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
nvkm_vm_unmap(&nvbe->node->vma[0]); nouveau_mem_fini(nvbe->mem);
return 0; return 0;
} }
...@@ -61,30 +63,20 @@ static int ...@@ -61,30 +63,20 @@ static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg) nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nvkm_mem *node = reg->mm_node; struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
/* noop: bound in move_notify() */
if (ttm->sg) {
node->sg = ttm->sg;
node->pages = NULL;
} else {
node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
}
node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
return 0;
}
static int ret = nouveau_mem_host(reg, &nvbe->ttm);
nv50_sgdma_unbind(struct ttm_tt *ttm) if (ret)
{ return ret;
/* noop: unbound in move_notify() */
nvbe->mem = mem;
return 0; return 0;
} }
static struct ttm_backend_func nv50_sgdma_backend = { static struct ttm_backend_func nv50_sgdma_backend = {
.bind = nv50_sgdma_bind, .bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind, .unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy .destroy = nouveau_sgdma_destroy
}; };
......
...@@ -23,53 +23,37 @@ ...@@ -23,53 +23,37 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE. * USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h" #include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
#include <drm/drm_legacy.h> #include <drm/drm_legacy.h>
#include <core/tegra.h> #include <core/tegra.h>
static int static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{ {
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
man->priv = fb;
return 0; return 0;
} }
static int static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) nouveau_manager_fini(struct ttm_mem_type_manager *man)
{ {
man->priv = NULL;
return 0; return 0;
} }
static inline void static void
nvkm_mem_node_cleanup(struct nvkm_mem *node) nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
{ {
if (node->vma[0].node) { nouveau_mem_del(reg);
nvkm_vm_unmap(&node->vma[0]);
nvkm_vm_put(&node->vma[0]);
}
if (node->vma[1].node) {
nvkm_vm_unmap(&node->vma[1]);
nvkm_vm_put(&node->vma[1]);
}
} }
static void static void
nouveau_vram_manager_del(struct ttm_mem_type_manager *man, nouveau_manager_debug(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *reg) struct drm_printer *printer)
{ {
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
nvkm_mem_node_cleanup(reg->mm_node);
ram->func->put(ram, (struct nvkm_mem **)&reg->mm_node);
} }
static int static int
...@@ -78,192 +62,105 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ...@@ -78,192 +62,105 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_mem_reg *reg) struct ttm_mem_reg *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_mem *node; struct nouveau_drm *drm = nvbo->cli->drm;
u32 size_nc = 0; struct nouveau_mem *mem;
int ret; int ret;
if (drm->client.device.info.ram_size == 0) if (drm->client.device.info.ram_size == 0)
return -ENOMEM; return -ENOMEM;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
size_nc = 1 << nvbo->page_shift; mem = nouveau_mem(reg);
if (ret)
return ret;
ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT, ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
reg->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) { if (ret) {
reg->mm_node = NULL; nouveau_mem_del(reg);
return (ret == -ENOSPC) ? 0 : ret; if (ret == -ENOSPC) {
reg->mm_node = NULL;
return 0;
}
return ret;
} }
node->page_shift = nvbo->page_shift;
reg->mm_node = node;
reg->start = node->offset >> PAGE_SHIFT;
return 0; return 0;
} }
const struct ttm_mem_type_manager_func nouveau_vram_manager = { const struct ttm_mem_type_manager_func nouveau_vram_manager = {
.init = nouveau_vram_manager_init, .init = nouveau_manager_init,
.takedown = nouveau_vram_manager_fini, .takedown = nouveau_manager_fini,
.get_node = nouveau_vram_manager_new, .get_node = nouveau_vram_manager_new,
.put_node = nouveau_vram_manager_del, .put_node = nouveau_manager_del,
.debug = nouveau_manager_debug,
}; };
static int
nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
return 0;
}
static int
nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
{
return 0;
}
static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *reg)
{
nvkm_mem_node_cleanup(reg->mm_node);
kfree(reg->mm_node);
reg->mm_node = NULL;
}
static int static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man, nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_mem_reg *reg) struct ttm_mem_reg *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_mem *node; struct nouveau_drm *drm = nvbo->cli->drm;
struct nouveau_mem *mem;
node = kzalloc(sizeof(*node), GFP_KERNEL); int ret;
if (!node)
return -ENOMEM;
node->page_shift = 12; ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
mem = nouveau_mem(reg);
switch (drm->client.device.info.family) { if (ret)
case NV_DEVICE_INFO_V0_TNT: return ret;
case NV_DEVICE_INFO_V0_CELSIUS:
case NV_DEVICE_INFO_V0_KELVIN:
case NV_DEVICE_INFO_V0_RANKINE:
case NV_DEVICE_INFO_V0_CURIE:
break;
case NV_DEVICE_INFO_V0_TESLA:
if (drm->client.device.info.chipset != 0x50)
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
break;
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
case NV_DEVICE_INFO_V0_PASCAL:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
drm->client.device.info.family);
break;
}
reg->mm_node = node; reg->start = 0;
reg->start = 0;
return 0; return 0;
} }
static void
nouveau_gart_manager_debug(struct ttm_mem_type_manager *man,
struct drm_printer *printer)
{
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = { const struct ttm_mem_type_manager_func nouveau_gart_manager = {
.init = nouveau_gart_manager_init, .init = nouveau_manager_init,
.takedown = nouveau_gart_manager_fini, .takedown = nouveau_manager_fini,
.get_node = nouveau_gart_manager_new, .get_node = nouveau_gart_manager_new,
.put_node = nouveau_gart_manager_del, .put_node = nouveau_manager_del,
.debug = nouveau_gart_manager_debug .debug = nouveau_manager_debug
}; };
/*XXX*/
#include <subdev/mmu/nv04.h>
static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
struct nv04_mmu *priv = (void *)mmu;
struct nvkm_vm *vm = NULL;
nvkm_vm_ref(priv->vm, &vm, NULL);
man->priv = vm;
return 0;
}
static int
nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
{
struct nvkm_vm *vm = man->priv;
nvkm_vm_ref(NULL, &vm, NULL);
man->priv = NULL;
return 0;
}
static void
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
{
struct nvkm_mem *node = reg->mm_node;
if (node->vma[0].node)
nvkm_vm_put(&node->vma[0]);
kfree(reg->mm_node);
reg->mm_node = NULL;
}
static int static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man, nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_mem_reg *reg) struct ttm_mem_reg *reg)
{ {
struct nvkm_mem *node; struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nvbo->cli->drm;
struct nouveau_mem *mem;
int ret; int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL); ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
if (!node) mem = nouveau_mem(reg);
return -ENOMEM; if (ret)
return ret;
node->page_shift = 12;
ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift, ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
NV_MEM_ACCESS_RW, &node->vma[0]); reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) { if (ret) {
kfree(node); nouveau_mem_del(reg);
if (ret == -ENOSPC) {
reg->mm_node = NULL;
return 0;
}
return ret; return ret;
} }
reg->mm_node = node; reg->start = mem->vma[0].addr >> PAGE_SHIFT;
reg->start = node->vma[0].offset >> PAGE_SHIFT;
return 0; return 0;
} }
static void
nv04_gart_manager_debug(struct ttm_mem_type_manager *man,
struct drm_printer *printer)
{
}
const struct ttm_mem_type_manager_func nv04_gart_manager = { const struct ttm_mem_type_manager_func nv04_gart_manager = {
.init = nv04_gart_manager_init, .init = nouveau_manager_init,
.takedown = nv04_gart_manager_fini, .takedown = nouveau_manager_fini,
.get_node = nv04_gart_manager_new, .get_node = nv04_gart_manager_new,
.put_node = nv04_gart_manager_del, .put_node = nouveau_manager_del,
.debug = nv04_gart_manager_debug .debug = nouveau_manager_debug
}; };
int int
...@@ -343,44 +240,43 @@ nouveau_ttm_init(struct nouveau_drm *drm) ...@@ -343,44 +240,43 @@ nouveau_ttm_init(struct nouveau_drm *drm)
{ {
struct nvkm_device *device = nvxx_device(&drm->client.device); struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nvkm_pci *pci = device->pci; struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev; struct drm_device *dev = drm->dev;
u8 bits; int typei, ret;
int ret;
if (pci && pci->agp.bridge) { typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
drm->agp.bridge = pci->agp.bridge; NVIF_MEM_COHERENT);
drm->agp.base = pci->agp.base; if (typei < 0)
drm->agp.size = pci->agp.size; return -ENOSYS;
drm->agp.cma = pci->agp.cma;
}
bits = nvxx_mmu(&drm->client.device)->dma_bits; drm->ttm.type_host = typei;
if (nvxx_device(&drm->client.device)->func->pci) {
if (drm->agp.bridge)
bits = 32;
} else if (device->func->tegra) {
struct nvkm_device_tegra *tegra = device->func->tegra(device);
/* typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
* If the platform can use a IOMMU, then the addressable DMA if (typei < 0)
* space is constrained by the IOMMU bit return -ENOSYS;
*/
if (tegra->func->iommu_bit)
bits = min(bits, tegra->func->iommu_bit);
} drm->ttm.type_ncoh = typei;
ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits)); if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
if (ret && bits != 32) { drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
bits = 32; typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits)); NVIF_MEM_KIND |
NVIF_MEM_COMP |
NVIF_MEM_DISP);
if (typei < 0)
return -ENOSYS;
drm->ttm.type_vram = typei;
} else {
drm->ttm.type_vram = -1;
} }
if (ret)
return ret;
ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits)); if (pci && pci->agp.bridge) {
if (ret) drm->agp.bridge = pci->agp.bridge;
dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32)); drm->agp.base = pci->agp.base;
drm->agp.size = pci->agp.size;
drm->agp.cma = pci->agp.cma;
}
ret = nouveau_ttm_global_init(drm); ret = nouveau_ttm_global_init(drm);
if (ret) if (ret)
...@@ -391,7 +287,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) ...@@ -391,7 +287,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
&nouveau_bo_driver, &nouveau_bo_driver,
dev->anon_inode->i_mapping, dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, DRM_FILE_PAGE_OFFSET,
bits <= 32 ? true : false); drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) { if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret); NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret; return ret;
...@@ -415,7 +311,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) ...@@ -415,7 +311,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* GART init */ /* GART init */
if (!drm->agp.bridge) { if (!drm->agp.bridge) {
drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit; drm->gem.gart_available = drm->client.vmm.vmm.limit;
} else { } else {
drm->gem.gart_available = drm->agp.size; drm->gem.gart_available = drm->agp.size;
} }
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_vmm.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"
#include "nouveau_mem.h"
void
nouveau_vma_unmap(struct nouveau_vma *vma)
{
if (vma->mem) {
nvif_vmm_unmap(&vma->vmm->vmm, vma->addr);
vma->mem = NULL;
}
}
int
nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
{
struct nvif_vma tmp = { .addr = vma->addr };
int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp);
if (ret)
return ret;
vma->mem = mem;
return 0;
}
struct nouveau_vma *
nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm)
{
struct nouveau_vma *vma;
list_for_each_entry(vma, &nvbo->vma_list, head) {
if (vma->vmm == vmm)
return vma;
}
return NULL;
}
void
nouveau_vma_del(struct nouveau_vma **pvma)
{
struct nouveau_vma *vma = *pvma;
if (vma && --vma->refs <= 0) {
if (likely(vma->addr != ~0ULL)) {
struct nvif_vma tmp = { .addr = vma->addr, .size = 1 };
nvif_vmm_put(&vma->vmm->vmm, &tmp);
}
list_del(&vma->head);
*pvma = NULL;
kfree(*pvma);
}
}
int
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
struct nouveau_vma **pvma)
{
struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
struct nouveau_vma *vma;
struct nvif_vma tmp;
int ret;
if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
vma->refs++;
return 0;
}
if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
return -ENOMEM;
vma->vmm = vmm;
vma->refs = 1;
vma->addr = ~0ULL;
vma->mem = NULL;
list_add_tail(&vma->head, &nvbo->vma_list);
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
mem->mem.size, &tmp);
if (ret)
goto done;
vma->addr = tmp.addr;
ret = nouveau_vma_map(vma, mem);
} else {
ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
mem->mem.size, &tmp);
vma->addr = tmp.addr;
}
done:
if (ret)
nouveau_vma_del(pvma);
return ret;
}
void
nouveau_vmm_fini(struct nouveau_vmm *vmm)
{
nvif_vmm_fini(&vmm->vmm);
vmm->cli = NULL;
}
int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{
int ret = nvif_vmm_init(&cli->mmu, oclass, PAGE_SIZE, 0, NULL, 0,
&vmm->vmm);
if (ret)
return ret;
vmm->cli = cli;
return 0;
}
#ifndef __NOUVEAU_VMA_H__
#define __NOUVEAU_VMA_H__
#include <nvif/vmm.h>
struct nouveau_bo;
struct nouveau_mem;
struct nouveau_vma {
struct nouveau_vmm *vmm;
int refs;
struct list_head head;
u64 addr;
struct nouveau_mem *mem;
};
struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);
int nouveau_vma_new(struct nouveau_bo *, struct nouveau_vmm *,
struct nouveau_vma **);
void nouveau_vma_del(struct nouveau_vma **);
int nouveau_vma_map(struct nouveau_vma *, struct nouveau_mem *);
void nouveau_vma_unmap(struct nouveau_vma *);
struct nouveau_vmm {
struct nouveau_cli *cli;
struct nvif_vmm vmm;
struct nvkm_vm *vm;
};
int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
void nouveau_vmm_fini(struct nouveau_vmm *);
#endif
...@@ -318,7 +318,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp, ...@@ -318,7 +318,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
ret = nvif_object_init(disp, 0, oclass[0], ret = nvif_object_init(disp, 0, oclass[0],
data, size, &chan->user); data, size, &chan->user);
if (ret == 0) if (ret == 0)
nvif_object_map(&chan->user); nvif_object_map(&chan->user, NULL, 0);
nvif_object_sclass_put(&sclass); nvif_object_sclass_put(&sclass);
return ret; return ret;
} }
...@@ -424,7 +424,7 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb) ...@@ -424,7 +424,7 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
{ {
struct nouveau_drm *drm = nouveau_drm(fb->base.dev); struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
struct nv50_dmac_ctxdma *ctxdma; struct nv50_dmac_ctxdma *ctxdma;
const u8 kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8; const u8 kind = fb->nvbo->kind;
const u32 handle = 0xfb000000 | kind; const u32 handle = 0xfb000000 | kind;
struct { struct {
struct nv_dma_v0 base; struct nv_dma_v0 base;
...@@ -510,6 +510,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, ...@@ -510,6 +510,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
int ret; int ret;
mutex_init(&dmac->lock); mutex_init(&dmac->lock);
INIT_LIST_HEAD(&dmac->ctxdma);
dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE, dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
&dmac->handle, GFP_KERNEL); &dmac->handle, GFP_KERNEL);
...@@ -556,7 +557,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, ...@@ -556,7 +557,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret) if (ret)
return ret; return ret;
INIT_LIST_HEAD(&dmac->ctxdma);
return ret; return ret;
} }
...@@ -847,7 +847,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, ...@@ -847,7 +847,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
asyw->image.w = fb->base.width; asyw->image.w = fb->base.width;
asyw->image.h = fb->base.height; asyw->image.h = fb->base.height;
asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8; asyw->image.kind = fb->nvbo->kind;
if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
asyw->interval = 0; asyw->interval = 0;
...@@ -857,9 +857,9 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, ...@@ -857,9 +857,9 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
if (asyw->image.kind) { if (asyw->image.kind) {
asyw->image.layout = 0; asyw->image.layout = 0;
if (drm->client.device.info.chipset >= 0xc0) if (drm->client.device.info.chipset >= 0xc0)
asyw->image.block = fb->nvbo->tile_mode >> 4; asyw->image.block = fb->nvbo->mode >> 4;
else else
asyw->image.block = fb->nvbo->tile_mode; asyw->image.block = fb->nvbo->mode;
asyw->image.pitch = (fb->base.pitches[0] / 4) << 4; asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
} else { } else {
asyw->image.layout = 1; asyw->image.layout = 1;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_dma.h" #include "nouveau_dma.h"
#include "nouveau_fbcon.h" #include "nouveau_fbcon.h"
#include "nouveau_vmm.h"
int int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
...@@ -239,8 +240,8 @@ nv50_fbcon_accel_init(struct fb_info *info) ...@@ -239,8 +240,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual); OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset)); OUT_RING(chan, upper_32_bits(fb->vma->addr));
OUT_RING(chan, lower_32_bits(fb->vma.offset)); OUT_RING(chan, lower_32_bits(fb->vma->addr));
BEGIN_NV04(chan, NvSub2D, 0x0230, 2); BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format); OUT_RING(chan, format);
OUT_RING(chan, 1); OUT_RING(chan, 1);
...@@ -248,8 +249,8 @@ nv50_fbcon_accel_init(struct fb_info *info) ...@@ -248,8 +249,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual); OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset)); OUT_RING(chan, upper_32_bits(fb->vma->addr));
OUT_RING(chan, lower_32_bits(fb->vma.offset)); OUT_RING(chan, lower_32_bits(fb->vma->addr));
FIRE_RING(chan); FIRE_RING(chan);
return 0; return 0;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_dma.h" #include "nouveau_dma.h"
#include "nouveau_fence.h" #include "nouveau_fence.h"
#include "nouveau_vmm.h"
#include "nv50_display.h" #include "nv50_display.h"
...@@ -68,12 +69,7 @@ nv84_fence_emit(struct nouveau_fence *fence) ...@@ -68,12 +69,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
{ {
struct nouveau_channel *chan = fence->channel; struct nouveau_channel *chan = fence->channel;
struct nv84_fence_chan *fctx = chan->fence; struct nv84_fence_chan *fctx = chan->fence;
u64 addr = chan->chid * 16; u64 addr = fctx->vma->addr + chan->chid * 16;
if (fence->sysmem)
addr += fctx->vma_gart.offset;
else
addr += fctx->vma.offset;
return fctx->base.emit32(chan, addr, fence->base.seqno); return fctx->base.emit32(chan, addr, fence->base.seqno);
} }
...@@ -83,12 +79,7 @@ nv84_fence_sync(struct nouveau_fence *fence, ...@@ -83,12 +79,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan) struct nouveau_channel *prev, struct nouveau_channel *chan)
{ {
struct nv84_fence_chan *fctx = chan->fence; struct nv84_fence_chan *fctx = chan->fence;
u64 addr = prev->chid * 16; u64 addr = fctx->vma->addr + prev->chid * 16;
if (fence->sysmem)
addr += fctx->vma_gart.offset;
else
addr += fctx->vma.offset;
return fctx->base.sync32(chan, addr, fence->base.seqno); return fctx->base.sync32(chan, addr, fence->base.seqno);
} }
...@@ -108,8 +99,7 @@ nv84_fence_context_del(struct nouveau_channel *chan) ...@@ -108,8 +99,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
mutex_lock(&priv->mutex); mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); nouveau_vma_del(&fctx->vma);
nouveau_bo_vma_del(priv->bo, &fctx->vma);
mutex_unlock(&priv->mutex); mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base); nouveau_fence_context_del(&fctx->base);
chan->fence = NULL; chan->fence = NULL;
...@@ -137,11 +127,7 @@ nv84_fence_context_new(struct nouveau_channel *chan) ...@@ -137,11 +127,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sequence = nv84_fence_read(chan); fctx->base.sequence = nv84_fence_read(chan);
mutex_lock(&priv->mutex); mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); ret = nouveau_vma_new(priv->bo, &cli->vmm, &fctx->vma);
if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
mutex_unlock(&priv->mutex); mutex_unlock(&priv->mutex);
if (ret) if (ret)
...@@ -182,10 +168,6 @@ static void ...@@ -182,10 +168,6 @@ static void
nv84_fence_destroy(struct nouveau_drm *drm) nv84_fence_destroy(struct nouveau_drm *drm)
{ {
struct nv84_fence_priv *priv = drm->fence; struct nv84_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo_gart);
if (priv->bo_gart)
nouveau_bo_unpin(priv->bo_gart);
nouveau_bo_ref(NULL, &priv->bo_gart);
nouveau_bo_unmap(priv->bo); nouveau_bo_unmap(priv->bo);
if (priv->bo) if (priv->bo)
nouveau_bo_unpin(priv->bo); nouveau_bo_unpin(priv->bo);
...@@ -238,21 +220,6 @@ nv84_fence_create(struct nouveau_drm *drm) ...@@ -238,21 +220,6 @@ nv84_fence_create(struct nouveau_drm *drm)
nouveau_bo_ref(NULL, &priv->bo); nouveau_bo_ref(NULL, &priv->bo);
} }
if (ret == 0)
ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
0, NULL, NULL, &priv->bo_gart);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
if (ret == 0) {
ret = nouveau_bo_map(priv->bo_gart);
if (ret)
nouveau_bo_unpin(priv->bo_gart);
}
if (ret)
nouveau_bo_ref(NULL, &priv->bo_gart);
}
if (ret) if (ret)
nv84_fence_destroy(drm); nv84_fence_destroy(drm);
return ret; return ret;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
#include "nouveau_dma.h" #include "nouveau_dma.h"
#include "nouveau_fbcon.h" #include "nouveau_fbcon.h"
#include "nouveau_vmm.h"
int int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
...@@ -239,8 +240,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) ...@@ -239,8 +240,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length); OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual); OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual); OUT_RING (chan, info->var.yres_virtual);
OUT_RING (chan, upper_32_bits(fb->vma.offset)); OUT_RING (chan, upper_32_bits(fb->vma->addr));
OUT_RING (chan, lower_32_bits(fb->vma.offset)); OUT_RING (chan, lower_32_bits(fb->vma->addr));
BEGIN_NVC0(chan, NvSub2D, 0x0230, 10); BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format); OUT_RING (chan, format);
OUT_RING (chan, 1); OUT_RING (chan, 1);
...@@ -250,8 +251,8 @@ nvc0_fbcon_accel_init(struct fb_info *info) ...@@ -250,8 +251,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length); OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual); OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual); OUT_RING (chan, info->var.yres_virtual);
OUT_RING (chan, upper_32_bits(fb->vma.offset)); OUT_RING (chan, upper_32_bits(fb->vma->addr));
OUT_RING (chan, lower_32_bits(fb->vma.offset)); OUT_RING (chan, lower_32_bits(fb->vma->addr));
FIRE_RING (chan); FIRE_RING (chan);
return 0; return 0;
......
...@@ -2,4 +2,7 @@ nvif-y := nvif/object.o ...@@ -2,4 +2,7 @@ nvif-y := nvif/object.o
nvif-y += nvif/client.o nvif-y += nvif/client.o
nvif-y += nvif/device.o nvif-y += nvif/device.o
nvif-y += nvif/driver.o nvif-y += nvif/driver.o
nvif-y += nvif/mem.o
nvif-y += nvif/mmu.o
nvif-y += nvif/notify.o nvif-y += nvif/notify.o
nvif-y += nvif/vmm.o
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/mem.h>
#include <nvif/client.h>
#include <nvif/if000a.h>
void
nvif_mem_fini(struct nvif_mem *mem)
{
nvif_object_fini(&mem->object);
}
int
nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
u64 size, void *argv, u32 argc, struct nvif_mem *mem)
{
struct nvif_mem_v0 *args;
u8 stack[128];
int ret;
mem->object.client = NULL;
if (type < 0)
return -EINVAL;
if (sizeof(*args) + argc > sizeof(stack)) {
if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
return -ENOMEM;
} else {
args = (void *)stack;
}
args->version = 0;
args->type = type;
args->page = page;
args->size = size;
memcpy(args->data, argv, argc);
ret = nvif_object_init(&mmu->object, 0, oclass, args,
sizeof(*args) + argc, &mem->object);
if (ret == 0) {
mem->type = mmu->type[type].type;
mem->page = args->page;
mem->addr = args->addr;
mem->size = args->size;
}
if (args != (void *)stack)
kfree(args);
return ret;
}
int
nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
u64 size, void *argv, u32 argc, struct nvif_mem *mem)
{
int ret = -EINVAL, i;
mem->object.client = NULL;
for (i = 0; ret && i < mmu->type_nr; i++) {
if ((mmu->type[i].type & type) == type) {
ret = nvif_mem_init_type(mmu, oclass, i, page, size,
argv, argc, mem);
}
}
return ret;
}
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/mmu.h>
#include <nvif/class.h>
#include <nvif/if0008.h>
void
nvif_mmu_fini(struct nvif_mmu *mmu)
{
kfree(mmu->kind);
kfree(mmu->type);
kfree(mmu->heap);
nvif_object_fini(&mmu->object);
}
int
nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
{
struct nvif_mmu_v0 args;
int ret, i;
args.version = 0;
mmu->heap = NULL;
mmu->type = NULL;
mmu->kind = NULL;
ret = nvif_object_init(parent, 0, oclass, &args, sizeof(args),
&mmu->object);
if (ret)
goto done;
mmu->dmabits = args.dmabits;
mmu->heap_nr = args.heap_nr;
mmu->type_nr = args.type_nr;
mmu->kind_nr = args.kind_nr;
mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
if (ret = -ENOMEM, !mmu->heap || !mmu->type)
goto done;
mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL);
if (!mmu->kind && mmu->kind_nr)
goto done;
for (i = 0; i < mmu->heap_nr; i++) {
struct nvif_mmu_heap_v0 args = { .index = i };
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_HEAP,
&args, sizeof(args));
if (ret)
goto done;
mmu->heap[i].size = args.size;
}
for (i = 0; i < mmu->type_nr; i++) {
struct nvif_mmu_type_v0 args = { .index = i };
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_TYPE,
&args, sizeof(args));
if (ret)
goto done;
mmu->type[i].type = 0;
if (args.vram) mmu->type[i].type |= NVIF_MEM_VRAM;
if (args.host) mmu->type[i].type |= NVIF_MEM_HOST;
if (args.comp) mmu->type[i].type |= NVIF_MEM_COMP;
if (args.disp) mmu->type[i].type |= NVIF_MEM_DISP;
if (args.kind ) mmu->type[i].type |= NVIF_MEM_KIND;
if (args.mappable) mmu->type[i].type |= NVIF_MEM_MAPPABLE;
if (args.coherent) mmu->type[i].type |= NVIF_MEM_COHERENT;
if (args.uncached) mmu->type[i].type |= NVIF_MEM_UNCACHED;
mmu->type[i].heap = args.heap;
}
if (mmu->kind_nr) {
struct nvif_mmu_kind_v0 *kind;
u32 argc = sizeof(*kind) + sizeof(*kind->data) * mmu->kind_nr;
if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
goto done;
kind->version = 0;
kind->count = mmu->kind_nr;
ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_KIND,
kind, argc);
if (ret == 0)
memcpy(mmu->kind, kind->data, kind->count);
kfree(kind);
}
done:
if (ret)
nvif_mmu_fini(mmu);
return ret;
}
...@@ -166,46 +166,77 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size) ...@@ -166,46 +166,77 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
} }
void void
nvif_object_unmap(struct nvif_object *object) nvif_object_unmap_handle(struct nvif_object *object)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_unmap unmap;
} args = {
.ioctl.type = NVIF_IOCTL_V0_UNMAP,
};
nvif_object_ioctl(object, &args, sizeof(args), NULL);
}
int
nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc,
u64 *handle, u64 *length)
{ {
if (object->map.size) { struct {
struct nvif_client *client = object->client; struct nvif_ioctl_v0 ioctl;
struct { struct nvif_ioctl_map_v0 map;
struct nvif_ioctl_v0 ioctl; } *args;
struct nvif_ioctl_unmap unmap; u32 argn = sizeof(*args) + argc;
} args = { int ret, maptype;
.ioctl.type = NVIF_IOCTL_V0_UNMAP,
}; if (!(args = kzalloc(argn, GFP_KERNEL)))
return -ENOMEM;
args->ioctl.type = NVIF_IOCTL_V0_MAP;
memcpy(args->map.data, argv, argc);
if (object->map.ptr) { ret = nvif_object_ioctl(object, args, argn, NULL);
*handle = args->map.handle;
*length = args->map.length;
maptype = args->map.type;
kfree(args);
return ret ? ret : (maptype == NVIF_IOCTL_MAP_V0_IO);
}
void
nvif_object_unmap(struct nvif_object *object)
{
struct nvif_client *client = object->client;
if (object->map.ptr) {
if (object->map.size) {
client->driver->unmap(client, object->map.ptr, client->driver->unmap(client, object->map.ptr,
object->map.size); object->map.size);
object->map.ptr = NULL; object->map.size = 0;
} }
object->map.ptr = NULL;
nvif_object_ioctl(object, &args, sizeof(args), NULL); nvif_object_unmap_handle(object);
object->map.size = 0;
} }
} }
int int
nvif_object_map(struct nvif_object *object) nvif_object_map(struct nvif_object *object, void *argv, u32 argc)
{ {
struct nvif_client *client = object->client; struct nvif_client *client = object->client;
struct { u64 handle, length;
struct nvif_ioctl_v0 ioctl; int ret = nvif_object_map_handle(object, argv, argc, &handle, &length);
struct nvif_ioctl_map_v0 map; if (ret >= 0) {
} args = { if (ret) {
.ioctl.type = NVIF_IOCTL_V0_MAP, object->map.ptr = client->driver->map(client,
}; handle,
int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); length);
if (ret == 0) { if (ret = -ENOMEM, object->map.ptr) {
object->map.size = args.map.length; object->map.size = length;
object->map.ptr = client->driver->map(client, args.map.handle, return 0;
object->map.size); }
if (ret = -ENOMEM, object->map.ptr) } else {
object->map.ptr = (void *)(unsigned long)handle;
return 0; return 0;
nvif_object_unmap(object); }
nvif_object_unmap_handle(object);
} }
return ret; return ret;
} }
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/vmm.h>
#include <nvif/mem.h>
#include <nvif/if000c.h>
int
nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
{
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
&(struct nvif_vmm_unmap_v0) { .addr = addr },
sizeof(struct nvif_vmm_unmap_v0));
}
int
nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *mem, u64 offset)
{
struct nvif_vmm_map_v0 *args;
u8 stack[16];
int ret;
if (sizeof(*args) + argc > sizeof(stack)) {
if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
return -ENOMEM;
} else {
args = (void *)stack;
}
args->version = 0;
args->addr = addr;
args->size = size;
args->memory = nvif_handle(&mem->object);
args->offset = offset;
memcpy(args->data, argv, argc);
ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
args, sizeof(*args) + argc);
if (args != (void *)stack)
kfree(args);
return ret;
}
void
nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
{
if (vma->size) {
WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
&(struct nvif_vmm_put_v0) {
.addr = vma->addr,
}, sizeof(struct nvif_vmm_put_v0)));
vma->size = 0;
}
}
int
nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *vma)
{
struct nvif_vmm_get_v0 args;
int ret;
args.version = vma->size = 0;
args.sparse = sparse;
args.page = page;
args.align = align;
args.size = size;
switch (type) {
case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
default:
WARN_ON(1);
return -EINVAL;
}
ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
&args, sizeof(args));
if (ret == 0) {
vma->addr = args.addr;
vma->size = args.size;
}
return ret;
}
void
nvif_vmm_fini(struct nvif_vmm *vmm)
{
kfree(vmm->page);
nvif_object_fini(&vmm->object);
}
int
nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
void *argv, u32 argc, struct nvif_vmm *vmm)
{
struct nvif_vmm_v0 *args;
u32 argn = sizeof(*args) + argc;
int ret = -ENOSYS, i;
vmm->object.client = NULL;
vmm->page = NULL;
if (!(args = kmalloc(argn, GFP_KERNEL)))
return -ENOMEM;
args->version = 0;
args->addr = addr;
args->size = size;
memcpy(args->data, argv, argc);
ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
&vmm->object);
if (ret)
goto done;
vmm->start = args->addr;
vmm->limit = args->size;
vmm->page_nr = args->page_nr;
vmm->page = kmalloc(sizeof(*vmm->page) * vmm->page_nr, GFP_KERNEL);
if (!vmm->page) {
ret = -ENOMEM;
goto done;
}
for (i = 0; i < vmm->page_nr; i++) {
struct nvif_vmm_page_v0 args = { .index = i };
ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
&args, sizeof(args));
if (ret)
break;
vmm->page[i].shift = args.shift;
vmm->page[i].sparse = args.sparse;
vmm->page[i].vram = args.vram;
vmm->page[i].host = args.host;
vmm->page[i].comp = args.comp;
}
done:
if (ret)
nvif_vmm_fini(vmm);
kfree(args);
return ret;
}
...@@ -301,5 +301,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, ...@@ -301,5 +301,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
client->debug = nvkm_dbgopt(dbg, "CLIENT"); client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT; client->objroot = RB_ROOT;
client->ntfy = ntfy; client->ntfy = ntfy;
INIT_LIST_HEAD(&client->umem);
spin_lock_init(&client->lock);
return 0; return 0;
} }
...@@ -126,6 +126,15 @@ nvkm_engine_init(struct nvkm_subdev *subdev) ...@@ -126,6 +126,15 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
return ret; return ret;
} }
static int
nvkm_engine_preinit(struct nvkm_subdev *subdev)
{
struct nvkm_engine *engine = nvkm_engine(subdev);
if (engine->func->preinit)
engine->func->preinit(engine);
return 0;
}
static void * static void *
nvkm_engine_dtor(struct nvkm_subdev *subdev) nvkm_engine_dtor(struct nvkm_subdev *subdev)
{ {
...@@ -138,6 +147,7 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev) ...@@ -138,6 +147,7 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func static const struct nvkm_subdev_func
nvkm_engine_func = { nvkm_engine_func = {
.dtor = nvkm_engine_dtor, .dtor = nvkm_engine_dtor,
.preinit = nvkm_engine_preinit,
.init = nvkm_engine_init, .init = nvkm_engine_init,
.fini = nvkm_engine_fini, .fini = nvkm_engine_fini,
.intr = nvkm_engine_intr, .intr = nvkm_engine_intr,
......
...@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) ...@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
} }
/* accessor functions for gpuobjs allocated directly from instmem */ /* accessor functions for gpuobjs allocated directly from instmem */
static int
nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc)
{
return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
}
static u32 static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{ {
...@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = { ...@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = {
.release = nvkm_gpuobj_heap_release, .release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_rd32_fast, .rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast, .wr32 = nvkm_gpuobj_wr32_fast,
.map = nvkm_gpuobj_heap_map,
}; };
static const struct nvkm_gpuobj_func static const struct nvkm_gpuobj_func
...@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = { ...@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = {
.release = nvkm_gpuobj_heap_release, .release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_heap_rd32, .rd32 = nvkm_gpuobj_heap_rd32,
.wr32 = nvkm_gpuobj_heap_wr32, .wr32 = nvkm_gpuobj_heap_wr32,
.map = nvkm_gpuobj_heap_map,
}; };
static void * static void *
...@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj) ...@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap = { nvkm_gpuobj_heap = {
.acquire = nvkm_gpuobj_heap_acquire, .acquire = nvkm_gpuobj_heap_acquire,
.map = nvkm_gpuobj_heap_map,
}; };
/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */ /* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
static int
nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
struct nvkm_vmm *vmm, struct nvkm_vma *vma,
void *argv, u32 argc)
{
return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
vmm, vma, argv, argc);
}
static u32 static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{ {
...@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = { ...@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = {
.release = nvkm_gpuobj_release, .release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32_fast, .rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast, .wr32 = nvkm_gpuobj_wr32_fast,
.map = nvkm_gpuobj_map,
}; };
static const struct nvkm_gpuobj_func static const struct nvkm_gpuobj_func
...@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = { ...@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = {
.release = nvkm_gpuobj_release, .release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32, .rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32, .wr32 = nvkm_gpuobj_wr32,
.map = nvkm_gpuobj_map,
}; };
static void * static void *
...@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj) ...@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func static const struct nvkm_gpuobj_func
nvkm_gpuobj_func = { nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire, .acquire = nvkm_gpuobj_acquire,
.map = nvkm_gpuobj_map,
}; };
static int static int
...@@ -185,7 +208,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero, ...@@ -185,7 +208,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
gpuobj->size = nvkm_memory_size(gpuobj->memory); gpuobj->size = nvkm_memory_size(gpuobj->memory);
} }
return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
} }
void void
...@@ -196,7 +219,7 @@ nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj) ...@@ -196,7 +219,7 @@ nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
if (gpuobj->parent) if (gpuobj->parent)
nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node); nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
nvkm_mm_fini(&gpuobj->heap); nvkm_mm_fini(&gpuobj->heap);
nvkm_memory_del(&gpuobj->memory); nvkm_memory_unref(&gpuobj->memory);
kfree(*pgpuobj); kfree(*pgpuobj);
*pgpuobj = NULL; *pgpuobj = NULL;
} }
...@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero, ...@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
return ret; return ret;
} }
int
nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
u32 access, struct nvkm_vma *vma)
{
struct nvkm_memory *memory = gpuobj->memory;
int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
if (ret == 0)
nvkm_memory_map(memory, vma, 0);
return ret;
}
void
nvkm_gpuobj_unmap(struct nvkm_vma *vma)
{
if (vma->node) {
nvkm_vm_unmap(vma);
nvkm_vm_put(vma);
}
}
/* the below is basically only here to support sharing the paged dma object /* the below is basically only here to support sharing the paged dma object
* for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
* anywhere else. * anywhere else.
......
...@@ -53,7 +53,7 @@ nvkm_ioctl_sclass(struct nvkm_client *client, ...@@ -53,7 +53,7 @@ nvkm_ioctl_sclass(struct nvkm_client *client,
union { union {
struct nvif_ioctl_sclass_v0 v0; struct nvif_ioctl_sclass_v0 v0;
} *args = data; } *args = data;
struct nvkm_oclass oclass; struct nvkm_oclass oclass = { .client = client };
int ret = -ENOSYS, i = 0; int ret = -ENOSYS, i = 0;
nvif_ioctl(object, "sclass size %d\n", size); nvif_ioctl(object, "sclass size %d\n", size);
...@@ -257,13 +257,19 @@ nvkm_ioctl_map(struct nvkm_client *client, ...@@ -257,13 +257,19 @@ nvkm_ioctl_map(struct nvkm_client *client,
union { union {
struct nvif_ioctl_map_v0 v0; struct nvif_ioctl_map_v0 v0;
} *args = data; } *args = data;
enum nvkm_object_map type;
int ret = -ENOSYS; int ret = -ENOSYS;
nvif_ioctl(object, "map size %d\n", size); nvif_ioctl(object, "map size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(object, "map vers %d\n", args->v0.version); nvif_ioctl(object, "map vers %d\n", args->v0.version);
ret = nvkm_object_map(object, &args->v0.handle, ret = nvkm_object_map(object, data, size, &type,
&args->v0.length); &args->v0.handle,
&args->v0.length);
if (type == NVKM_OBJECT_MAP_IO)
args->v0.type = NVIF_IOCTL_MAP_V0_IO;
else
args->v0.type = NVIF_IOCTL_MAP_V0_VA;
} }
return ret; return ret;
...@@ -281,6 +287,7 @@ nvkm_ioctl_unmap(struct nvkm_client *client, ...@@ -281,6 +287,7 @@ nvkm_ioctl_unmap(struct nvkm_client *client,
nvif_ioctl(object, "unmap size %d\n", size); nvif_ioctl(object, "unmap size %d\n", size);
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
nvif_ioctl(object, "unmap\n"); nvif_ioctl(object, "unmap\n");
ret = nvkm_object_unmap(object);
} }
return ret; return ret;
......
...@@ -22,27 +22,116 @@ ...@@ -22,27 +22,116 @@
* Authors: Ben Skeggs <bskeggs@redhat.com> * Authors: Ben Skeggs <bskeggs@redhat.com>
*/ */
#include <core/memory.h> #include <core/memory.h>
#include <core/mm.h>
#include <subdev/fb.h>
#include <subdev/instmem.h> #include <subdev/instmem.h>
void
nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
struct nvkm_tags **ptags)
{
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags = *ptags;
if (tags) {
mutex_lock(&fb->subdev.mutex);
if (refcount_dec_and_test(&tags->refcount)) {
nvkm_mm_free(&fb->tags, &tags->mn);
kfree(memory->tags);
memory->tags = NULL;
}
mutex_unlock(&fb->subdev.mutex);
*ptags = NULL;
}
}
int
nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
u32 nr, void (*clr)(struct nvkm_device *, u32, u32),
struct nvkm_tags **ptags)
{
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags;
mutex_lock(&fb->subdev.mutex);
if ((tags = memory->tags)) {
/* If comptags exist for the memory, but a different amount
* than requested, the buffer is being mapped with settings
* that are incompatible with existing mappings.
*/
if (tags->mn && tags->mn->length != nr) {
mutex_unlock(&fb->subdev.mutex);
return -EINVAL;
}
refcount_inc(&tags->refcount);
*ptags = tags;
return 0;
}
if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
mutex_unlock(&fb->subdev.mutex);
return -ENOMEM;
}
if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
if (clr)
clr(device, tags->mn->offset, tags->mn->length);
} else {
/* Failure to allocate HW comptags is not an error, the
* caller should fall back to an uncompressed map.
*
* As memory can be mapped in multiple places, we still
* need to track the allocation failure and ensure that
* any additional mappings remain uncompressed.
*
* This is handled by returning an empty nvkm_tags.
*/
tags->mn = NULL;
}
refcount_set(&tags->refcount, 1);
mutex_unlock(&fb->subdev.mutex);
*ptags = tags;
return 0;
}
void void
nvkm_memory_ctor(const struct nvkm_memory_func *func, nvkm_memory_ctor(const struct nvkm_memory_func *func,
struct nvkm_memory *memory) struct nvkm_memory *memory)
{ {
memory->func = func; memory->func = func;
kref_init(&memory->kref);
}
static void
nvkm_memory_del(struct kref *kref)
{
struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref);
if (!WARN_ON(!memory->func)) {
if (memory->func->dtor)
memory = memory->func->dtor(memory);
kfree(memory);
}
} }
void void
nvkm_memory_del(struct nvkm_memory **pmemory) nvkm_memory_unref(struct nvkm_memory **pmemory)
{ {
struct nvkm_memory *memory = *pmemory; struct nvkm_memory *memory = *pmemory;
if (memory && !WARN_ON(!memory->func)) { if (memory) {
if (memory->func->dtor) kref_put(&memory->kref, nvkm_memory_del);
*pmemory = memory->func->dtor(memory);
kfree(*pmemory);
*pmemory = NULL; *pmemory = NULL;
} }
} }
struct nvkm_memory *
nvkm_memory_ref(struct nvkm_memory *memory)
{
if (memory)
kref_get(&memory->kref);
return memory;
}
int int
nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target, nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
u64 size, u32 align, bool zero, u64 size, u32 align, bool zero,
......
...@@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, ...@@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
} }
int int
nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block) nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
{ {
struct nvkm_mm_node *node, *prev; struct nvkm_mm_node *node, *prev;
u32 next; u32 next;
...@@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block) ...@@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
list_add_tail(&node->nl_entry, &mm->nodes); list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free); list_add_tail(&node->fl_entry, &mm->free);
node->heap = ++mm->heap_nodes; node->heap = heap;
mm->heap_nodes++;
return 0; return 0;
} }
......
...@@ -102,10 +102,19 @@ nvkm_object_ntfy(struct nvkm_object *object, u32 mthd, ...@@ -102,10 +102,19 @@ nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
} }
int int
nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size) nvkm_object_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{ {
if (likely(object->func->map)) if (likely(object->func->map))
return object->func->map(object, addr, size); return object->func->map(object, argv, argc, type, addr, size);
return -ENODEV;
}
int
nvkm_object_unmap(struct nvkm_object *object)
{
if (likely(object->func->unmap))
return object->func->unmap(object);
return -ENODEV; return -ENODEV;
} }
...@@ -259,6 +268,7 @@ nvkm_object_dtor(struct nvkm_object *object) ...@@ -259,6 +268,7 @@ nvkm_object_dtor(struct nvkm_object *object)
} }
nvif_debug(object, "destroy running...\n"); nvif_debug(object, "destroy running...\n");
nvkm_object_unmap(object);
if (object->func->dtor) if (object->func->dtor)
data = object->func->dtor(object); data = object->func->dtor(object);
nvkm_engine_unref(&object->engine); nvkm_engine_unref(&object->engine);
......
...@@ -37,9 +37,17 @@ nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd, ...@@ -37,9 +37,17 @@ nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
} }
static int static int
nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size) nvkm_oproxy_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{ {
return nvkm_object_map(nvkm_oproxy(object)->object, addr, size); struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
return nvkm_object_map(oproxy->object, argv, argc, type, addr, size);
}
static int
nvkm_oproxy_unmap(struct nvkm_object *object)
{
return nvkm_object_unmap(nvkm_oproxy(object)->object);
} }
static int static int
...@@ -171,6 +179,7 @@ nvkm_oproxy_func = { ...@@ -171,6 +179,7 @@ nvkm_oproxy_func = {
.mthd = nvkm_oproxy_mthd, .mthd = nvkm_oproxy_mthd,
.ntfy = nvkm_oproxy_ntfy, .ntfy = nvkm_oproxy_ntfy,
.map = nvkm_oproxy_map, .map = nvkm_oproxy_map,
.unmap = nvkm_oproxy_unmap,
.rd08 = nvkm_oproxy_rd08, .rd08 = nvkm_oproxy_rd08,
.rd16 = nvkm_oproxy_rd16, .rd16 = nvkm_oproxy_rd16,
.rd32 = nvkm_oproxy_rd32, .rd32 = nvkm_oproxy_rd32,
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
*/ */
#include <core/ramht.h> #include <core/ramht.h>
#include <core/engine.h> #include <core/engine.h>
#include <core/object.h>
static u32 static u32
nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle) nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
......
#ifndef __NVKM_DEVICE_CTRL_H__ #ifndef __NVKM_DEVICE_CTRL_H__
#define __NVKM_DEVICE_CTRL_H__ #define __NVKM_DEVICE_CTRL_H__
#define nvkm_control(p) container_of((p), struct nvkm_control, object) #define nvkm_control(p) container_of((p), struct nvkm_control, object)
#include <core/device.h> #include <core/object.h>
struct nvkm_control { struct nvkm_control {
struct nvkm_object object; struct nvkm_object object;
......
#ifndef __NV50_DISP_CHAN_H__ #ifndef __NV50_DISP_CHAN_H__
#define __NV50_DISP_CHAN_H__ #define __NV50_DISP_CHAN_H__
#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object) #define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
#include <core/object.h>
#include "nv50.h" #include "nv50.h"
struct nv50_disp_chan { struct nv50_disp_chan {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册