vmm.h 13.5 KB
Newer Older
1 2 3 4
#ifndef __NVKM_VMM_H__
#define __NVKM_VMM_H__
#include "priv.h"
#include <core/memory.h>
5
enum nvkm_memory_target;
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52

struct nvkm_vmm_pt {
	/* Some GPUs have a mapping level with a dual page tables to
	 * support large and small pages in the same address-range.
	 *
	 * We track the state of both page tables in one place, which
	 * is why there's multiple PT pointers/refcounts here.
	 */
	struct nvkm_mmu_pt *pt[2];
	u32 refs[2];

	/* Page size handled by this PT.
	 *
	 * Tesla backend needs to know this when writinge PDEs,
	 * otherwise unnecessary.
	 */
	u8 page;

	/* Entire page table sparse.
	 *
	 * Used to propagate sparseness to child page tables.
	 */
	bool sparse:1;

	/* Tracking for page directories.
	 *
	 * The array is indexed by PDE, and will either point to the
	 * child page table, or indicate the PDE is marked as sparse.
	 **/
#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
#define NVKM_VMM_PDE_SPARSE       ERR_PTR(-EBUSY)
	struct nvkm_vmm_pt **pde;

	/* Tracking for dual page tables.
	 *
	 * There's one entry for each LPTE, keeping track of whether
	 * there are valid SPTEs in the same address-range.
	 *
	 * This information is used to manage LPTE state transitions.
	 */
#define NVKM_VMM_PTE_SPARSE 0x80
#define NVKM_VMM_PTE_VALID  0x40
#define NVKM_VMM_PTE_SPTES  0x3f
	u8 pte[];
};

53 54 55 56 57 58 59
typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
				  struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *,
				  struct nvkm_vmm_pt *, u32 pdei);
typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *,
				  u32 ptei, u32 ptes, struct nvkm_vmm_map *);

60
struct nvkm_vmm_desc_func {
61 62 63 64 65 66 67 68 69
	nvkm_vmm_pxe_func invalid;
	nvkm_vmm_pxe_func unmap;
	nvkm_vmm_pxe_func sparse;

	nvkm_vmm_pde_func pde;

	nvkm_vmm_pte_func mem;
	nvkm_vmm_pte_func dma;
	nvkm_vmm_pte_func sgl;
70 71
};

72
extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
73
void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32);
74
extern const struct nvkm_vmm_desc_func gf100_vmm_pgt;
75 76 77 78 79 80 81
void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
		       struct nvkm_vmm_map *);
void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
		       struct nvkm_vmm_map *);
void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
		       struct nvkm_vmm_map *);
82

83 84
void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);

85 86 87 88 89 90 91 92 93 94 95 96 97
struct nvkm_vmm_desc {
	enum {
		PGD,
		PGT,
		SPT,
		LPT,
	} type;
	u8 bits;	/* VMA bits covered by PT. */
	u8 size;	/* Bytes-per-PTE. */
	u32 align;	/* PT address alignment. */
	const struct nvkm_vmm_desc_func *func;
};

98 99 100 101 102
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[];

103 104 105 106 107
extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[];
extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[];
extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[];
extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[];

108 109 110
extern const struct nvkm_vmm_desc gp100_vmm_desc_12[];
extern const struct nvkm_vmm_desc gp100_vmm_desc_16[];

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
struct nvkm_vmm_page {
	u8 shift;
	const struct nvkm_vmm_desc *desc;
#define NVKM_VMM_PAGE_SPARSE                                               0x01
#define NVKM_VMM_PAGE_VRAM                                                 0x02
#define NVKM_VMM_PAGE_HOST                                                 0x04
#define NVKM_VMM_PAGE_COMP                                                 0x08
#define NVKM_VMM_PAGE_Sxxx                                (NVKM_VMM_PAGE_SPARSE)
#define NVKM_VMM_PAGE_xVxx                                  (NVKM_VMM_PAGE_VRAM)
#define NVKM_VMM_PAGE_SVxx             (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
#define NVKM_VMM_PAGE_xxHx                                  (NVKM_VMM_PAGE_HOST)
#define NVKM_VMM_PAGE_SxHx             (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
#define NVKM_VMM_PAGE_xVHx             (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
#define NVKM_VMM_PAGE_SVHx             (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
#define NVKM_VMM_PAGE_xVxC             (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
#define NVKM_VMM_PAGE_SVxC             (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
#define NVKM_VMM_PAGE_xxHC             (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
#define NVKM_VMM_PAGE_SxHC             (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
	u8 type;
};

struct nvkm_vmm_func {
	int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
	void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);

136 137 138 139 140
	int (*aper)(enum nvkm_memory_target);
	int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
		     struct nvkm_vmm_map *);
	void (*flush)(struct nvkm_vmm *, int depth);

141 142 143 144
	u64 page_block;
	const struct nvkm_vmm_page page[];
};

145 146 147 148 149
struct nvkm_vmm_join {
	struct nvkm_memory *inst;
	struct list_head head;
};

150 151 152 153 154 155 156
int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
		  u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
		  const char *name, struct nvkm_vmm **);
int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
		  u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
		  const char *name, struct nvkm_vmm *);
void nvkm_vmm_dtor(struct nvkm_vmm *);
157 158 159 160 161 162 163 164 165
void nvkm_vmm_ptes_put(struct nvkm_vmm *, const struct nvkm_vmm_page *,
		       u64 addr, u64 size);
int nvkm_vmm_ptes_get(struct nvkm_vmm *, const struct nvkm_vmm_page *,
		      u64 addr, u64 size);
void nvkm_vmm_ptes_map(struct nvkm_vmm *, const struct nvkm_vmm_page *,
		       u64 addr, u64 size, struct nvkm_vmm_map *,
		       nvkm_vmm_pte_func);
void nvkm_vmm_ptes_unmap(struct nvkm_vmm *, const struct nvkm_vmm_page *,
			 u64 addr, u64 size, bool sparse);
166

167 168 169
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
		  u64, u64, void *, u32, struct lock_class_key *,
		  const char *, struct nvkm_vmm **);
170
int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
171

172 173 174 175 176 177
int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
		   struct nvkm_mmu *, u64, u64, void *, u32,
		   struct lock_class_key *, const char *, struct nvkm_vmm **);
int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
178 179 180 181
int gf100_vmm_aper(enum nvkm_memory_target);
int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void gf100_vmm_flush_(struct nvkm_vmm *, int);
void gf100_vmm_flush(struct nvkm_vmm *, int);
182

183 184
int gk20a_vmm_aper(enum nvkm_memory_target);

185 186 187 188 189 190
int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
		   struct nvkm_mmu *, u64, u64, void *, u32,
		   struct lock_class_key *, const char *, struct nvkm_vmm **);
int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);

191 192
int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);

193 194
int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		 struct lock_class_key *, const char *, struct nvkm_vmm **);
195 196
int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		 struct lock_class_key *, const char *, struct nvkm_vmm **);
197 198
int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		 struct lock_class_key *, const char *, struct nvkm_vmm **);
199 200 201 202
int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		 struct lock_class_key *, const char *, struct nvkm_vmm **);
int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		struct lock_class_key *, const char *, struct nvkm_vmm **);
203 204
int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		  struct lock_class_key *, const char *, struct nvkm_vmm **);
205 206 207 208
int gk104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		  struct lock_class_key *, const char *, struct nvkm_vmm **);
int gk20a_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		  struct lock_class_key *, const char *, struct nvkm_vmm **);
209 210 211 212 213 214 215 216 217 218 219 220
int gm200_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
			struct lock_class_key *, const char *,
			struct nvkm_vmm **);
int gm200_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
int gm20b_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
			struct lock_class_key *, const char *,
			struct nvkm_vmm **);
int gm20b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
221 222 223 224 225 226
int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
		  struct lock_class_key *, const char *,
		  struct nvkm_vmm **);
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307

#define VMM_PRINT(l,v,p,f,a...) do {                                           \
	struct nvkm_vmm *_vmm = (v);                                           \
	if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) {               \
		nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n",            \
			     _vmm->name, ##a);                                 \
	}                                                                      \
} while(0)
#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a)
#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a)
#define VMM_SPAM(v,f,a...)  VMM_PRINT(NV_DBG_SPAM , (v),  dbg, f, ##a)

#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do {            \
	nvkm_kmap((PT)->memory);                                               \
	while (PTEN) {                                                         \
		u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift;           \
		u64 _addr = ((BASE) + MAP->off);                               \
                                                                               \
		if (_ptes > PTEN) {                                            \
			MAP->off += PTEN << MAP->page->shift;                  \
			_ptes = PTEN;                                          \
		} else {                                                       \
			MAP->off = 0;                                          \
			NEXT;                                                  \
		}                                                              \
                                                                               \
		VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes);      \
                                                                               \
		FILL(VMM, PT, PTEI, _ptes, MAP, _addr);                        \
		PTEI += _ptes;                                                 \
		PTEN -= _ptes;                                                 \
	};                                                                     \
	nvkm_done((PT)->memory);                                               \
} while(0)

#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
		     ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT),             \
		     ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT),             \
		     (MAP->mem = MAP->mem->next))
#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
		     *MAP->dma, PAGE_SIZE, MAP->dma++)
#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL)                            \
	VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,                                \
		     sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl),           \
		     (MAP->sgl = sg_next(MAP->sgl)))

#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c))
#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d))
#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do {                                     \
	const u32 _pteo = (o); u##b _data = (d);                               \
	VMM_SPAM((v), "   %010llx "f, (m)->addr + _pteo, _data, ##a);          \
	VMM_##fn((m), (m)->base + _pteo, _data, (c), b);                       \
} while(0)

#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d),  1, 32, WO, "%08x")
#define VMM_FO032(m,v,o,d,c)                                                   \
	VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c))

#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d),  1, 64, WO, "%016llx")
#define VMM_FO064(m,v,o,d,c)                                                   \
	VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c))

#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do {                                   \
	u32 _pteo = (o), _ptes = (c);                                          \
	const u64 _addr = (m)->addr + _pteo;                                   \
	VMM_SPAM((v), "   %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a);   \
	while (_ptes--) {                                                      \
		nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo));           \
		nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi));           \
		_pteo += 0x10;                                                 \
	}                                                                      \
} while(0)

#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "")
#define VMM_FO128(m,v,o,lo,hi,c) do {                                          \
	nvkm_kmap((m)->memory);                                                \
	VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c));                    \
	nvkm_done((m)->memory);                                                \
} while(0)
308
#endif