nouveau_dma.c 7.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright (C) 2007 Ben Skeggs.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial
 * portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
31
#include "nouveau_ramht.h"
32

33
void
34
nouveau_dma_init(struct nouveau_channel *chan)
35
{
36 37 38
	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
	struct nouveau_bo *pushbuf = chan->pushbuf_bo;

B
Ben Skeggs 已提交
39
	if (dev_priv->card_type >= NV_50) {
40 41 42 43 44 45 46 47 48 49 50 51
		const int ib_size = pushbuf->bo.mem.size / 2;

		chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
		chan->dma.ib_max = (ib_size / 8) - 1;
		chan->dma.ib_put = 0;
		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;

		chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
	} else {
		chan->dma.max  = (pushbuf->bo.mem.size >> 2) - 2;
	}

52 53 54 55 56
	chan->dma.put  = 0;
	chan->dma.cur  = chan->dma.put;
	chan->dma.free = chan->dma.max - chan->dma.cur;
}

57 58 59 60 61 62 63 64 65 66 67 68 69
void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
{
	bool is_iomem;
	u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
	mem = &mem[chan->dma.cur];
	if (is_iomem)
		memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
	else
		memcpy(mem, data, nr_dwords * 4);
	chan->dma.cur += nr_dwords;
}

70 71 72 73 74 75 76 77
/* Fetch and adjust GPU GET pointer
 *
 * Returns:
 *  value >= 0, the adjusted GET pointer
 *  -EINVAL if GET pointer currently outside main push buffer
 *  -EBUSY if timeout exceeded
 */
static inline int
78
READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
79
{
80
	uint64_t val;
81 82

	val = nvchan_rd32(chan, chan->user_get);
83 84
        if (chan->user_get_hi)
                val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
85 86 87 88 89 90 91 92 93 94 95 96 97 98

	/* reset counter as long as GET is still advancing, this is
	 * to avoid misdetecting a GPU lockup if the GPU happens to
	 * just be processing an operation that takes a long time
	 */
	if (val != *prev_get) {
		*prev_get = val;
		*timeout = 0;
	}

	if ((++*timeout & 0xff) == 0) {
		DRM_UDELAY(1);
		if (*timeout > 100000)
			return -EBUSY;
99 100
	}

101 102 103 104 105
	if (val < chan->pushbuf_base ||
	    val > chan->pushbuf_base + (chan->dma.max << 2))
		return -EINVAL;

	return (val - chan->pushbuf_base) >> 2;
106 107
}

108 109
void
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
110
	      int delta, int length)
111 112
{
	struct nouveau_bo *pb = chan->pushbuf_bo;
113
	struct nouveau_vma *vma;
114
	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
115 116 117 118 119
	u64 offset;

	vma = nouveau_bo_vma_find(bo, chan->vm);
	BUG_ON(!vma);
	offset = vma->offset + delta;
120 121

	BUG_ON(chan->dma.ib_free < 1);
122
	nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
123
	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
124 125

	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
126 127 128 129 130

	DRM_MEMORYBARRIER();
	/* Flush writes. */
	nouveau_bo_rd32(pb, 0);

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
	chan->dma.ib_free--;
}

static int
nv50_dma_push_wait(struct nouveau_channel *chan, int count)
{
	uint32_t cnt = 0, prev_get = 0;

	while (chan->dma.ib_free < count) {
		uint32_t get = nvchan_rd32(chan, 0x88);
		if (get != prev_get) {
			prev_get = get;
			cnt = 0;
		}

		if ((++cnt & 0xff) == 0) {
			DRM_UDELAY(1);
			if (cnt > 100000)
				return -EBUSY;
		}

		chan->dma.ib_free = get - chan->dma.ib_put;
		if (chan->dma.ib_free <= 0)
155
			chan->dma.ib_free += chan->dma.ib_max;
156 157 158 159 160 161 162 163
	}

	return 0;
}

static int
nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
{
164 165
	uint64_t prev_get = 0;
	int ret, cnt = 0;
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203

	ret = nv50_dma_push_wait(chan, slots + 1);
	if (unlikely(ret))
		return ret;

	while (chan->dma.free < count) {
		int get = READ_GET(chan, &prev_get, &cnt);
		if (unlikely(get < 0)) {
			if (get == -EINVAL)
				continue;

			return get;
		}

		if (get <= chan->dma.cur) {
			chan->dma.free = chan->dma.max - chan->dma.cur;
			if (chan->dma.free >= count)
				break;

			FIRE_RING(chan);
			do {
				get = READ_GET(chan, &prev_get, &cnt);
				if (unlikely(get < 0)) {
					if (get == -EINVAL)
						continue;
					return get;
				}
			} while (get == 0);
			chan->dma.cur = 0;
			chan->dma.put = 0;
		}

		chan->dma.free = get - chan->dma.cur - 1;
	}

	return 0;
}

204
int
205
nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
206
{
207 208
	uint64_t prev_get = 0;
	int cnt = 0, get;
209

210 211 212
	if (chan->dma.ib_max)
		return nv50_dma_wait(chan, slots, size);

213
	while (chan->dma.free < size) {
214 215 216
		get = READ_GET(chan, &prev_get, &cnt);
		if (unlikely(get == -EBUSY))
			return -EBUSY;
217 218 219 220 221 222 223 224 225 226

		/* loop until we have a usable GET pointer.  the value
		 * we read from the GPU may be outside the main ring if
		 * PFIFO is processing a buffer called from the main ring,
		 * discard these values until something sensible is seen.
		 *
		 * the other case we discard GET is while the GPU is fetching
		 * from the SKIPS area, so the code below doesn't have to deal
		 * with some fun corner cases.
		 */
227
		if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
			continue;

		if (get <= chan->dma.cur) {
			/* engine is fetching behind us, or is completely
			 * idle (GET == PUT) so we have free space up until
			 * the end of the push buffer
			 *
			 * we can only hit that path once per call due to
			 * looping back to the beginning of the push buffer,
			 * we'll hit the fetching-ahead-of-us path from that
			 * point on.
			 *
			 * the *one* exception to that rule is if we read
			 * GET==PUT, in which case the below conditional will
			 * always succeed and break us out of the wait loop.
			 */
			chan->dma.free = chan->dma.max - chan->dma.cur;
			if (chan->dma.free >= size)
				break;

			/* not enough space left at the end of the push buffer,
			 * instruct the GPU to jump back to the start right
			 * after processing the currently pending commands.
			 */
			OUT_RING(chan, chan->pushbuf_base | 0x20000000);
253 254 255 256 257 258 259 260 261 262 263 264 265

			/* wait for GET to depart from the skips area.
			 * prevents writing GET==PUT and causing a race
			 * condition that causes us to think the GPU is
			 * idle when it's not.
			 */
			do {
				get = READ_GET(chan, &prev_get, &cnt);
				if (unlikely(get == -EBUSY))
					return -EBUSY;
				if (unlikely(get == -EINVAL))
					continue;
			} while (get <= NOUVEAU_DMA_SKIPS);
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
			WRITE_PUT(NOUVEAU_DMA_SKIPS);

			/* we're now submitting commands at the start of
			 * the push buffer.
			 */
			chan->dma.cur  =
			chan->dma.put  = NOUVEAU_DMA_SKIPS;
		}

		/* engine fetching ahead of us, we have space up until the
		 * current GET pointer.  the "- 1" is to ensure there's
		 * space left to emit a jump back to the beginning of the
		 * push buffer if we require it.  we can never get GET == PUT
		 * here, so this is safe.
		 */
		chan->dma.free = get - chan->dma.cur - 1;
	}

	return 0;
}