nouveau_dma.c 9.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright (C) 2007 Ben Skeggs.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial
 * portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
31
#include "nouveau_ramht.h"
32

33 34 35
void
nouveau_dma_pre_init(struct nouveau_channel *chan)
{
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
	struct nouveau_bo *pushbuf = chan->pushbuf_bo;

	if (dev_priv->card_type == NV_50) {
		const int ib_size = pushbuf->bo.mem.size / 2;

		chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
		chan->dma.ib_max = (ib_size / 8) - 1;
		chan->dma.ib_put = 0;
		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;

		chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
	} else {
		chan->dma.max  = (pushbuf->bo.mem.size >> 2) - 2;
	}

52 53 54 55 56
	chan->dma.put  = 0;
	chan->dma.cur  = chan->dma.put;
	chan->dma.free = chan->dma.max - chan->dma.cur;
}

57 58 59 60 61
int
nouveau_dma_init(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
62
	struct nouveau_gpuobj *obj = NULL;
63 64 65 66
	int ret, i;

	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
67
				    0x0039 : 0x5039, &obj);
68 69 70
	if (ret)
		return ret;

71 72
	ret = nouveau_ramht_insert(chan, NvM2MF, obj);
	nouveau_gpuobj_ref(NULL, &obj);
73 74 75
	if (ret)
		return ret;

76
	/* Create an NV_SW object for various sync purposes */
77
	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
78 79 80
	if (ret)
		return ret;

81 82
	ret = nouveau_ramht_insert(chan, NvSw, obj);
	nouveau_gpuobj_ref(NULL, &obj);
83 84 85
	if (ret)
		return ret;

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
	if (ret)
		return ret;

	/* Map push buffer */
	ret = nouveau_bo_map(chan->pushbuf_bo);
	if (ret)
		return ret;

	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
	if (ret)
		return ret;

	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
		OUT_RING(chan, 0);

	/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
	ret = RING_SPACE(chan, 4);
	if (ret)
		return ret;
	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
	OUT_RING(chan, NvM2MF);
	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
	OUT_RING(chan, NvNotify0);

113 114 115 116 117 118 119
	/* Initialise NV_SW */
	ret = RING_SPACE(chan, 2);
	if (ret)
		return ret;
	BEGIN_RING(chan, NvSubSw, 0, 1);
	OUT_RING(chan, NvSw);

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
	/* Sit back and pray the channel works.. */
	FIRE_RING(chan);

	return 0;
}

void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
{
	bool is_iomem;
	u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
	mem = &mem[chan->dma.cur];
	if (is_iomem)
		memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
	else
		memcpy(mem, data, nr_dwords * 4);
	chan->dma.cur += nr_dwords;
}

139 140 141 142 143 144 145 146 147
/* Fetch and adjust GPU GET pointer
 *
 * Returns:
 *  value >= 0, the adjusted GET pointer
 *  -EINVAL if GET pointer currently outside main push buffer
 *  -EBUSY if timeout exceeded
 */
static inline int
READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
148 149 150 151
{
	uint32_t val;

	val = nvchan_rd32(chan, chan->user_get);
152 153 154 155 156 157 158 159 160 161 162 163 164 165

	/* reset counter as long as GET is still advancing, this is
	 * to avoid misdetecting a GPU lockup if the GPU happens to
	 * just be processing an operation that takes a long time
	 */
	if (val != *prev_get) {
		*prev_get = val;
		*timeout = 0;
	}

	if ((++*timeout & 0xff) == 0) {
		DRM_UDELAY(1);
		if (*timeout > 100000)
			return -EBUSY;
166 167
	}

168 169 170 171 172
	if (val < chan->pushbuf_base ||
	    val > chan->pushbuf_base + (chan->dma.max << 2))
		return -EINVAL;

	return (val - chan->pushbuf_base) >> 2;
173 174
}

175 176
void
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
177
	      int delta, int length)
178 179
{
	struct nouveau_bo *pb = chan->pushbuf_bo;
180
	uint64_t offset = bo->bo.offset + delta;
181 182 183
	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;

	BUG_ON(chan->dma.ib_free < 1);
184
	nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
185
	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
186 187

	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
188 189 190 191 192

	DRM_MEMORYBARRIER();
	/* Flush writes. */
	nouveau_bo_rd32(pb, 0);

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
	chan->dma.ib_free--;
}

static int
nv50_dma_push_wait(struct nouveau_channel *chan, int count)
{
	uint32_t cnt = 0, prev_get = 0;

	while (chan->dma.ib_free < count) {
		uint32_t get = nvchan_rd32(chan, 0x88);
		if (get != prev_get) {
			prev_get = get;
			cnt = 0;
		}

		if ((++cnt & 0xff) == 0) {
			DRM_UDELAY(1);
			if (cnt > 100000)
				return -EBUSY;
		}

		chan->dma.ib_free = get - chan->dma.ib_put;
		if (chan->dma.ib_free <= 0)
			chan->dma.ib_free += chan->dma.ib_max + 1;
	}

	return 0;
}

static int
nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
{
	uint32_t cnt = 0, prev_get = 0;
	int ret;

	ret = nv50_dma_push_wait(chan, slots + 1);
	if (unlikely(ret))
		return ret;

	while (chan->dma.free < count) {
		int get = READ_GET(chan, &prev_get, &cnt);
		if (unlikely(get < 0)) {
			if (get == -EINVAL)
				continue;

			return get;
		}

		if (get <= chan->dma.cur) {
			chan->dma.free = chan->dma.max - chan->dma.cur;
			if (chan->dma.free >= count)
				break;

			FIRE_RING(chan);
			do {
				get = READ_GET(chan, &prev_get, &cnt);
				if (unlikely(get < 0)) {
					if (get == -EINVAL)
						continue;
					return get;
				}
			} while (get == 0);
			chan->dma.cur = 0;
			chan->dma.put = 0;
		}

		chan->dma.free = get - chan->dma.cur - 1;
	}

	return 0;
}

266
int
267
nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
268
{
269 270
	uint32_t prev_get = 0, cnt = 0;
	int get;
271

272 273 274
	if (chan->dma.ib_max)
		return nv50_dma_wait(chan, slots, size);

275
	while (chan->dma.free < size) {
276 277 278
		get = READ_GET(chan, &prev_get, &cnt);
		if (unlikely(get == -EBUSY))
			return -EBUSY;
279 280 281 282 283 284 285 286 287 288

		/* loop until we have a usable GET pointer.  the value
		 * we read from the GPU may be outside the main ring if
		 * PFIFO is processing a buffer called from the main ring,
		 * discard these values until something sensible is seen.
		 *
		 * the other case we discard GET is while the GPU is fetching
		 * from the SKIPS area, so the code below doesn't have to deal
		 * with some fun corner cases.
		 */
289
		if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
			continue;

		if (get <= chan->dma.cur) {
			/* engine is fetching behind us, or is completely
			 * idle (GET == PUT) so we have free space up until
			 * the end of the push buffer
			 *
			 * we can only hit that path once per call due to
			 * looping back to the beginning of the push buffer,
			 * we'll hit the fetching-ahead-of-us path from that
			 * point on.
			 *
			 * the *one* exception to that rule is if we read
			 * GET==PUT, in which case the below conditional will
			 * always succeed and break us out of the wait loop.
			 */
			chan->dma.free = chan->dma.max - chan->dma.cur;
			if (chan->dma.free >= size)
				break;

			/* not enough space left at the end of the push buffer,
			 * instruct the GPU to jump back to the start right
			 * after processing the currently pending commands.
			 */
			OUT_RING(chan, chan->pushbuf_base | 0x20000000);
315 316 317 318 319 320 321 322 323 324 325 326 327

			/* wait for GET to depart from the skips area.
			 * prevents writing GET==PUT and causing a race
			 * condition that causes us to think the GPU is
			 * idle when it's not.
			 */
			do {
				get = READ_GET(chan, &prev_get, &cnt);
				if (unlikely(get == -EBUSY))
					return -EBUSY;
				if (unlikely(get == -EINVAL))
					continue;
			} while (get <= NOUVEAU_DMA_SKIPS);
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
			WRITE_PUT(NOUVEAU_DMA_SKIPS);

			/* we're now submitting commands at the start of
			 * the push buffer.
			 */
			chan->dma.cur  =
			chan->dma.put  = NOUVEAU_DMA_SKIPS;
		}

		/* engine fetching ahead of us, we have space up until the
		 * current GET pointer.  the "- 1" is to ensure there's
		 * space left to emit a jump back to the beginning of the
		 * push buffer if we require it.  we can never get GET == PUT
		 * here, so this is safe.
		 */
		chan->dma.free = get - chan->dma.cur - 1;
	}

	return 0;
}