nouveau_sgdma.c 6.8 KB
Newer Older
1 2 3
#include "drmP.h"
#include "nouveau_drv.h"
#include <linux/pagemap.h>
4
#include <linux/slab.h>
5 6 7 8 9 10 11 12 13 14 15 16

#define NV_CTXDMA_PAGE_SHIFT 12
#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)

struct nouveau_sgdma_be {
	struct ttm_backend backend;
	struct drm_device *dev;

	dma_addr_t *pages;
	unsigned nr_pages;

17
	u64 offset;
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
	bool bound;
};

static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
		       struct page **pages, struct page *dummy_read_page)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
	struct drm_device *dev = nvbe->dev;

	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);

	if (nvbe->pages)
		return -EINVAL;

	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
	if (!nvbe->pages)
		return -ENOMEM;

	nvbe->nr_pages = 0;
	while (num_pages--) {
		nvbe->pages[nvbe->nr_pages] =
			pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(dev->pdev,
					  nvbe->pages[nvbe->nr_pages])) {
			be->func->clear(be);
			return -EFAULT;
		}

		nvbe->nr_pages++;
	}

	return 0;
}

static void
nouveau_sgdma_clear(struct ttm_backend *be)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58
	struct drm_device *dev;
59 60

	if (nvbe && nvbe->pages) {
61 62 63
		dev = nvbe->dev;
		NV_DEBUG(dev, "\n");

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
		if (nvbe->bound)
			be->func->unbind(be);

		while (nvbe->nr_pages--) {
			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		}
		kfree(nvbe->pages);
		nvbe->pages = NULL;
		nvbe->nr_pages = 0;
	}
}

static int
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
	struct drm_device *dev = nvbe->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
	unsigned i, j, pte;

86
	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
87

88 89
	nvbe->offset = mem->start << PAGE_SHIFT;
	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
90 91 92 93
	for (i = 0; i < nvbe->nr_pages; i++) {
		dma_addr_t dma_offset = nvbe->pages[i];
		uint32_t offset_l = lower_32_bits(dma_offset);

94 95
		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
			dma_offset += NV_CTXDMA_PAGE_SIZE;
		}
	}

	nvbe->bound = true;
	return 0;
}

static int
nouveau_sgdma_unbind(struct ttm_backend *be)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
	struct drm_device *dev = nvbe->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
	unsigned i, j, pte;

	NV_DEBUG(dev, "\n");

	if (!nvbe->bound)
		return 0;

118
	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
119
	for (i = 0; i < nvbe->nr_pages; i++) {
120 121
		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
122 123
	}

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
	nvbe->bound = false;
	return 0;
}

static void
nouveau_sgdma_destroy(struct ttm_backend *be)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;

	if (be) {
		NV_DEBUG(nvbe->dev, "\n");

		if (nvbe) {
			if (nvbe->pages)
				be->func->clear(be);
			kfree(nvbe);
		}
	}
}

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static int
nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;

	nvbe->offset = mem->start << PAGE_SHIFT;

	nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
			  nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
	nvbe->bound = true;
	return 0;
}

static int
nv50_sgdma_unbind(struct ttm_backend *be)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;

	if (!nvbe->bound)
		return 0;

	nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
			    nvbe->nr_pages << PAGE_SHIFT);
	nvbe->bound = false;
	return 0;
}

173 174 175 176 177 178 179 180
static struct ttm_backend_func nouveau_sgdma_backend = {
	.populate		= nouveau_sgdma_populate,
	.clear			= nouveau_sgdma_clear,
	.bind			= nouveau_sgdma_bind,
	.unbind			= nouveau_sgdma_unbind,
	.destroy		= nouveau_sgdma_destroy
};

181 182 183 184 185 186 187 188
static struct ttm_backend_func nv50_sgdma_backend = {
	.populate		= nouveau_sgdma_populate,
	.clear			= nouveau_sgdma_clear,
	.bind			= nv50_sgdma_bind,
	.unbind			= nv50_sgdma_unbind,
	.destroy		= nouveau_sgdma_destroy
};

189 190 191 192 193 194 195 196 197 198 199 200
struct ttm_backend *
nouveau_sgdma_init_ttm(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_sgdma_be *nvbe;

	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
	if (!nvbe)
		return NULL;

	nvbe->dev = dev;

201 202 203 204
	if (dev_priv->card_type < NV_50)
		nvbe->backend.func = &nouveau_sgdma_backend;
	else
		nvbe->backend.func = &nv50_sgdma_backend;
205 206 207 208 209 210 211 212 213 214 215 216
	return &nvbe->backend;
}

int
nouveau_sgdma_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *gpuobj = NULL;
	uint32_t aper_size, obj_size;
	int i, ret;

	if (dev_priv->card_type < NV_50) {
217 218 219 220 221
		if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
			aper_size = 64 * 1024 * 1024;
		else
			aper_size = 512 * 1024 * 1024;

222 223 224
		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
		obj_size += 8; /* ctxdma header */

225 226 227 228 229 230 231
		ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
					      NVOBJ_FLAG_ZERO_ALLOC |
					      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
		if (ret) {
			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
			return ret;
		}
232

233 234 235
		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
				   (1 << 12) /* PT present */ |
				   (0 << 13) /* PT *not* linear */ |
236 237
				   (0 << 14) /* RW */ |
				   (2 << 16) /* PCI */);
238
		nv_wo32(gpuobj, 4, aper_size - 1);
239 240
		for (i = 2; i < 2 + (aper_size >> 12); i++)
			nv_wo32(gpuobj, i * 4, 0x00000000);
241 242 243 244 245 246 247 248 249 250 251 252 253 254

		dev_priv->gart_info.sg_ctxdma = gpuobj;
		dev_priv->gart_info.aper_base = 0;
		dev_priv->gart_info.aper_size = aper_size;
	} else
	if (dev_priv->chan_vm) {
		ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
				     12, NV_MEM_ACCESS_RW,
				     &dev_priv->gart_info.vma);
		if (ret)
			return ret;

		dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
		dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
255 256 257 258 259 260 261 262 263 264 265
	}

	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
	return 0;
}

void
nouveau_sgdma_takedown(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;

266
	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
267
	nouveau_vm_put(&dev_priv->gart_info.vma);
268 269
}

270 271
uint32_t
nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
272 273 274
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
275
	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
276

277
	BUG_ON(dev_priv->card_type >= NV_50);
278

279 280
	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
		(offset & NV_CTXDMA_PAGE_MASK);
281
}