via_dmablit.c 21.4 KB
Newer Older
1
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
D
Dave Airlie 已提交
2
 *
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sub license,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
D
Dave Airlie 已提交
19 20 21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 23
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
24
 * Authors:
25 26 27 28 29 30
 *    Thomas Hellstrom.
 *    Partially based on code obtained from Digeo Inc.
 */


/*
D
Dave Airlie 已提交
31 32 33 34
 * Unmaps the DMA mappings.
 * FIXME: Is this a NoOp on x86? Also
 * FIXME: What happens if this one is called and a pending blit has previously done
 * the same DMA mappings?
35 36 37 38 39 40 41 42 43
 */

#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
#include "via_dmablit.h"

#include <linux/pagemap.h>

44 45 46
#define VIA_PGDN(x)	     (((unsigned long)(x)) & PAGE_MASK)
#define VIA_PGOFF(x)	    (((unsigned long)(x)) & ~PAGE_MASK)
#define VIA_PFN(x)	      ((unsigned long)(x) >> PAGE_SHIFT)
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

typedef struct _drm_via_descriptor {
	uint32_t mem_addr;
	uint32_t dev_addr;
	uint32_t size;
	uint32_t next;
} drm_via_descriptor_t;


/*
 * Unmap a DMA mapping.
 */



static void
via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
	int num_desc = vsg->num_desc;
	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
D
Dave Airlie 已提交
68
	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
69 70 71 72 73 74 75
		descriptor_this_page;
	dma_addr_t next = vsg->chain_start;

	while(num_desc--) {
		if (descriptor_this_page-- == 0) {
			cur_descriptor_page--;
			descriptor_this_page = vsg->descriptors_per_page - 1;
D
Dave Airlie 已提交
76
			desc_ptr = vsg->desc_pages[cur_descriptor_page] +
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
				descriptor_this_page;
		}
		dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
		dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
		next = (dma_addr_t) desc_ptr->next;
		desc_ptr--;
	}
}

/*
 * If mode = 0, count how many descriptors are needed.
 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
 * 'next' field without syncing calls when the descriptor is already mapped.
 */

static void
via_map_blit_for_device(struct pci_dev *pdev,
		   const drm_via_dmablit_t *xfer,
D
Dave Airlie 已提交
96
		   drm_via_sg_info_t *vsg,
97 98 99 100 101 102 103 104 105 106 107 108 109 110
		   int mode)
{
	unsigned cur_descriptor_page = 0;
	unsigned num_descriptors_this_page = 0;
	unsigned char *mem_addr = xfer->mem_addr;
	unsigned char *cur_mem;
	unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
	uint32_t fb_addr = xfer->fb_addr;
	uint32_t cur_fb;
	unsigned long line_len;
	unsigned remaining_len;
	int num_desc = 0;
	int cur_line;
	dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111
	drm_via_descriptor_t *desc_ptr = NULL;
112

D
Dave Airlie 已提交
113
	if (mode == 1)
114 115 116 117 118 119 120
		desc_ptr = vsg->desc_pages[cur_descriptor_page];

	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {

		line_len = xfer->line_length;
		cur_fb = fb_addr;
		cur_mem = mem_addr;
D
Dave Airlie 已提交
121

122 123
		while (line_len > 0) {

124
			remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 126 127
			line_len -= remaining_len;

			if (mode == 1) {
D
Dave Airlie 已提交
128 129 130
				desc_ptr->mem_addr =
					dma_map_page(&pdev->dev,
						     vsg->pages[VIA_PFN(cur_mem) -
131
								VIA_PFN(first_addr)],
D
Dave Airlie 已提交
132
						     VIA_PGOFF(cur_mem), remaining_len,
133
						     vsg->direction);
134
				desc_ptr->dev_addr = cur_fb;
D
Dave Airlie 已提交
135

136
				desc_ptr->size = remaining_len;
137
				desc_ptr->next = (uint32_t) next;
D
Dave Airlie 已提交
138
				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 140 141 142 143 144 145
						      DMA_TO_DEVICE);
				desc_ptr++;
				if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
					num_descriptors_this_page = 0;
					desc_ptr = vsg->desc_pages[++cur_descriptor_page];
				}
			}
D
Dave Airlie 已提交
146

147 148 149 150
			num_desc++;
			cur_mem += remaining_len;
			cur_fb += remaining_len;
		}
D
Dave Airlie 已提交
151

152 153 154 155 156 157 158 159 160 161 162 163
		mem_addr += xfer->mem_stride;
		fb_addr += xfer->fb_stride;
	}

	if (mode == 1) {
		vsg->chain_start = next;
		vsg->state = dr_via_device_mapped;
	}
	vsg->num_desc = num_desc;
}

/*
D
Dave Airlie 已提交
164
 * Function that frees up all resources for a blit. It is usable even if the
165
 * blit info has only been partially built as long as the status enum is consistent
166 167 168 169
 * with the actual status of the used resources.
 */


170
static void
D
Dave Airlie 已提交
171
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
{
	struct page *page;
	int i;

	switch(vsg->state) {
	case dr_via_device_mapped:
		via_unmap_blit_from_device(pdev, vsg);
	case dr_via_desc_pages_alloc:
		for (i=0; i<vsg->num_desc_pages; ++i) {
			if (vsg->desc_pages[i] != NULL)
			  free_page((unsigned long)vsg->desc_pages[i]);
		}
		kfree(vsg->desc_pages);
	case dr_via_pages_locked:
		for (i=0; i<vsg->num_pages; ++i) {
			if ( NULL != (page = vsg->pages[i])) {
D
Dave Airlie 已提交
188
				if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
189 190 191 192 193 194 195 196 197
					SetPageDirty(page);
				page_cache_release(page);
			}
		}
	case dr_via_pages_alloc:
		vfree(vsg->pages);
	default:
		vsg->state = dr_via_sg_init;
	}
198 199
	vfree(vsg->bounce_buffer);
	vsg->bounce_buffer = NULL;
200
	vsg->free_on_sequence = 0;
D
Dave Airlie 已提交
201
}
202 203 204 205 206 207

/*
 * Fire a blit engine.
 */

static void
208
via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
209 210 211 212 213
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;

	VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
	VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
D
Dave Airlie 已提交
214
	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
215 216 217 218
		  VIA_DMA_CSR_DE);
	VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
	VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
T
Thomas Hellstrom 已提交
219
	DRM_WRITEMEMORYBARRIER();
220
	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
T
Thomas Hellstrom 已提交
221
	VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
222 223 224 225 226 227 228 229 230 231 232 233
}

/*
 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
 * occur here if the calling user does not have access to the submitted address.
 */

static int
via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
{
	int ret;
	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
D
Dave Airlie 已提交
234
	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
235
		first_pfn + 1;
D
Dave Airlie 已提交
236

237
	if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
E
Eric Anholt 已提交
238
		return -ENOMEM;
239 240
	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
	down_read(&current->mm->mmap_sem);
241 242 243 244 245
	ret = get_user_pages(current, current->mm,
			     (unsigned long)xfer->mem_addr,
			     vsg->num_pages,
			     (vsg->direction == DMA_FROM_DEVICE),
			     0, vsg->pages, NULL);
246 247 248

	up_read(&current->mm->mmap_sem);
	if (ret != vsg->num_pages) {
D
Dave Airlie 已提交
249
		if (ret < 0)
250 251
			return ret;
		vsg->state = dr_via_pages_locked;
E
Eric Anholt 已提交
252
		return -EINVAL;
253 254 255 256 257 258 259 260 261 262 263 264
	}
	vsg->state = dr_via_pages_locked;
	DRM_DEBUG("DMA pages locked\n");
	return 0;
}

/*
 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
 * quite large for some blits, and pages don't need to be contingous.
 */

D
Dave Airlie 已提交
265
static int
266 267 268
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
	int i;
D
Dave Airlie 已提交
269

270
	vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
D
Dave Airlie 已提交
271
	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
272 273
		vsg->descriptors_per_page;

274
	if (NULL ==  (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
E
Eric Anholt 已提交
275
		return -ENOMEM;
D
Dave Airlie 已提交
276

277 278
	vsg->state = dr_via_desc_pages_alloc;
	for (i=0; i<vsg->num_desc_pages; ++i) {
D
Dave Airlie 已提交
279
		if (NULL == (vsg->desc_pages[i] =
280
			     (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
E
Eric Anholt 已提交
281
			return -ENOMEM;
282 283 284 285 286
	}
	DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
		  vsg->num_desc);
	return 0;
}
D
Dave Airlie 已提交
287

288
static void
289
via_abort_dmablit(struct drm_device *dev, int engine)
290 291 292 293 294 295 296
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;

	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
}

static void
297
via_dmablit_engine_off(struct drm_device *dev, int engine)
298 299 300
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;

D
Dave Airlie 已提交
301
	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
302 303 304 305 306 307 308 309 310 311
}



/*
 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
 * the workqueue task takes care of processing associated with the old blit.
 */
D
Dave Airlie 已提交
312

313
void
314
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
	int cur;
	int done_transfer;
	unsigned long irqsave=0;
	uint32_t status = 0;

	DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
		  engine, from_irq, (unsigned long) blitq);

	if (from_irq) {
		spin_lock(&blitq->blit_lock);
	} else {
		spin_lock_irqsave(&blitq->blit_lock, irqsave);
	}

D
Dave Airlie 已提交
332
	done_transfer = blitq->is_active &&
333
	  (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
D
Dave Airlie 已提交
334
	done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
335 336 337 338 339 340

	cur = blitq->cur;
	if (done_transfer) {

		blitq->blits[cur]->aborted = blitq->aborting;
		blitq->done_blit_handle++;
D
Dave Airlie 已提交
341
		DRM_WAKEUP(blitq->blit_queue + cur);
342 343

		cur++;
D
Dave Airlie 已提交
344
		if (cur >= VIA_NUM_BLIT_SLOTS)
345 346 347 348 349 350 351 352 353 354 355
			cur = 0;
		blitq->cur = cur;

		/*
		 * Clear transfer done flag.
		 */

		VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);

		blitq->is_active = 0;
		blitq->aborting = 0;
D
Dave Airlie 已提交
356
		schedule_work(&blitq->wq);
357 358 359 360 361 362 363 364 365 366 367

	} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {

		/*
		 * Abort transfer after one second.
		 */

		via_abort_dmablit(dev, engine);
		blitq->aborting = 1;
		blitq->end = jiffies + DRM_HZ;
	}
D
Dave Airlie 已提交
368

369 370 371 372 373 374 375
	if (!blitq->is_active) {
		if (blitq->num_outstanding) {
			via_fire_dmablit(dev, blitq->blits[cur], engine);
			blitq->is_active = 1;
			blitq->cur = cur;
			blitq->num_outstanding--;
			blitq->end = jiffies + DRM_HZ;
J
Jiri Slaby 已提交
376 377
			if (!timer_pending(&blitq->poll_timer))
				mod_timer(&blitq->poll_timer, jiffies + 1);
378 379 380 381 382 383
		} else {
			if (timer_pending(&blitq->poll_timer)) {
				del_timer(&blitq->poll_timer);
			}
			via_dmablit_engine_off(dev, engine);
		}
D
Dave Airlie 已提交
384
	}
385 386 387 388 389 390

	if (from_irq) {
		spin_unlock(&blitq->blit_lock);
	} else {
		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
	}
D
Dave Airlie 已提交
391
}
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426



/*
 * Check whether this blit is still active, performing necessary locking.
 */

static int
via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
{
	unsigned long irqsave;
	uint32_t slot;
	int active;

	spin_lock_irqsave(&blitq->blit_lock, irqsave);

	/*
	 * Allow for handle wraparounds.
	 */

	active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
		((blitq->cur_blit_handle - handle) <= (1 << 23));

	if (queue && active) {
		slot = handle - blitq->done_blit_handle + blitq->cur -1;
		if (slot >= VIA_NUM_BLIT_SLOTS) {
			slot -= VIA_NUM_BLIT_SLOTS;
		}
		*queue = blitq->blit_queue + slot;
	}

	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);

	return active;
}
D
Dave Airlie 已提交
427

428 429 430 431 432
/*
 * Sync. Wait for at least three seconds for the blit to be performed.
 */

static int
D
Dave Airlie 已提交
433
via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
434 435 436 437 438 439 440 441
{

	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
	wait_queue_head_t *queue;
	int ret = 0;

	if (via_dmablit_active(blitq, engine, handle, &queue)) {
D
Dave Airlie 已提交
442
		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
443 444 445 446
			    !via_dmablit_active(blitq, engine, handle, NULL));
	}
	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
		  handle, engine, ret);
D
Dave Airlie 已提交
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
	return ret;
}


/*
 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
 * a) Broken hardware (typically those that don't have any video capture facility).
 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
 * irqs, it will shorten the latency somewhat.
 */



static void
via_dmablit_timer(unsigned long data)
{
	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
466
	struct drm_device *dev = blitq->dev;
467 468
	int engine = (int)
		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
D
Dave Airlie 已提交
469 470

	DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
471 472 473
		  (unsigned long) jiffies);

	via_dmablit_handler(dev, engine, 0);
D
Dave Airlie 已提交
474

475
	if (!timer_pending(&blitq->poll_timer)) {
J
Jiri Slaby 已提交
476
		mod_timer(&blitq->poll_timer, jiffies + 1);
477

478 479 480 481 482 483 484 485
	       /*
		* Rerun handler to delete timer if engines are off, and
		* to shorten abort latency. This is a little nasty.
		*/

	       via_dmablit_handler(dev, engine, 0);

	}
486 487 488 489 490 491 492 493 494 495 496 497
}




/*
 * Workqueue task that frees data and mappings associated with a blit.
 * Also wakes up waiting processes. Each of these tasks handles one
 * blit engine only and may not be called on each interrupt.
 */


D
Dave Airlie 已提交
498
static void
D
David Howells 已提交
499
via_dmablit_workqueue(struct work_struct *work)
500
{
D
David Howells 已提交
501
	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
502
	struct drm_device *dev = blitq->dev;
503 504 505
	unsigned long irqsave;
	drm_via_sg_info_t *cur_sg;
	int cur_released;
D
Dave Airlie 已提交
506 507 508


	DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
509 510 511
		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));

	spin_lock_irqsave(&blitq->blit_lock, irqsave);
D
Dave Airlie 已提交
512

513 514 515 516 517 518
	while(blitq->serviced != blitq->cur) {

		cur_released = blitq->serviced++;

		DRM_DEBUG("Releasing blit slot %d\n", cur_released);

D
Dave Airlie 已提交
519
		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
520
			blitq->serviced = 0;
D
Dave Airlie 已提交
521

522 523
		cur_sg = blitq->blits[cur_released];
		blitq->num_free++;
D
Dave Airlie 已提交
524

525
		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
D
Dave Airlie 已提交
526

527
		DRM_WAKEUP(&blitq->busy_queue);
D
Dave Airlie 已提交
528

529 530
		via_free_sg_info(dev->pdev, cur_sg);
		kfree(cur_sg);
D
Dave Airlie 已提交
531

532 533 534 535 536
		spin_lock_irqsave(&blitq->blit_lock, irqsave);
	}

	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
D
Dave Airlie 已提交
537

538 539 540 541 542 543 544

/*
 * Init all blit engines. Currently we use two, but some hardware have 4.
 */


void
545
via_init_dmablit(struct drm_device *dev)
546 547 548 549 550
{
	int i,j;
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_blitq_t *blitq;

D
Dave Airlie 已提交
551 552
	pci_set_master(dev->pdev);

553 554 555 556 557 558 559 560
	for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
		blitq = dev_priv->blit_queues + i;
		blitq->dev = dev;
		blitq->cur_blit_handle = 0;
		blitq->done_blit_handle = 0;
		blitq->head = 0;
		blitq->cur = 0;
		blitq->serviced = 0;
561
		blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
562 563 564
		blitq->num_outstanding = 0;
		blitq->is_active = 0;
		blitq->aborting = 0;
I
Ingo Molnar 已提交
565
		spin_lock_init(&blitq->blit_lock);
566 567 568 569
		for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
		}
		DRM_INIT_WAITQUEUE(&blitq->busy_queue);
D
David Howells 已提交
570
		INIT_WORK(&blitq->wq, via_dmablit_workqueue);
J
Jiri Slaby 已提交
571 572
		setup_timer(&blitq->poll_timer, via_dmablit_timer,
				(unsigned long)blitq);
D
Dave Airlie 已提交
573
	}
574 575 576 577 578
}

/*
 * Build all info and do all mappings required for a blit.
 */
D
Dave Airlie 已提交
579

580 581

static int
582
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
583 584 585
{
	int draw = xfer->to_fb;
	int ret = 0;
D
Dave Airlie 已提交
586

587
	vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
588
	vsg->bounce_buffer = NULL;
589 590 591 592 593

	vsg->state = dr_via_sg_init;

	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
		DRM_ERROR("Zero size bitblt.\n");
E
Eric Anholt 已提交
594
		return -EINVAL;
595 596 597 598 599
	}

	/*
	 * Below check is a driver limitation, not a hardware one. We
	 * don't want to lock unused pages, and don't want to incoporate the
D
Dave Airlie 已提交
600
	 * extra logic of avoiding them. Make sure there are no.
601 602 603
	 * (Not a big limitation anyway.)
	 */

604
	if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
605 606
		DRM_ERROR("Too large system memory stride. Stride: %d, "
			  "Length: %d\n", xfer->mem_stride, xfer->line_length);
E
Eric Anholt 已提交
607
		return -EINVAL;
608 609
	}

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
	if ((xfer->mem_stride == xfer->line_length) &&
	   (xfer->fb_stride == xfer->line_length)) {
		xfer->mem_stride *= xfer->num_lines;
		xfer->line_length = xfer->mem_stride;
		xfer->fb_stride = xfer->mem_stride;
		xfer->num_lines = 1;
	}

	/*
	 * Don't lock an arbitrary large number of pages, since that causes a
	 * DOS security hole.
	 */

	if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
		DRM_ERROR("Too large PCI DMA bitblt.\n");
E
Eric Anholt 已提交
625
		return -EINVAL;
D
Dave Airlie 已提交
626
	}
627

D
Dave Airlie 已提交
628
	/*
629
	 * we allow a negative fb stride to allow flipping of images in
D
Dave Airlie 已提交
630
	 * transfer.
631 632 633 634 635
	 */

	if (xfer->mem_stride < xfer->line_length ||
		abs(xfer->fb_stride) < xfer->line_length) {
		DRM_ERROR("Invalid frame-buffer / memory stride.\n");
E
Eric Anholt 已提交
636
		return -EINVAL;
637 638 639 640 641 642 643 644 645 646
	}

	/*
	 * A hardware bug seems to be worked around if system memory addresses start on
	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
	 * about this. Meanwhile, impose the following restrictions:
	 */

#ifdef VIA_BUGFREE
	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
647
	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
648
		DRM_ERROR("Invalid DRM bitblt alignment.\n");
E
Eric Anholt 已提交
649
		return -EINVAL;
650 651 652
	}
#else
	if ((((unsigned long)xfer->mem_addr & 15) ||
653
	      ((unsigned long)xfer->fb_addr & 3)) ||
D
Dave Airlie 已提交
654
	   ((xfer->num_lines > 1) &&
655
	   ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
656
		DRM_ERROR("Invalid DRM bitblt alignment.\n");
E
Eric Anholt 已提交
657
		return -EINVAL;
D
Dave Airlie 已提交
658
	}
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
#endif

	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
		DRM_ERROR("Could not lock DMA pages.\n");
		via_free_sg_info(dev->pdev, vsg);
		return ret;
	}

	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
	if (0 != (ret = via_alloc_desc_pages(vsg))) {
		DRM_ERROR("Could not allocate DMA descriptor pages.\n");
		via_free_sg_info(dev->pdev, vsg);
		return ret;
	}
	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
D
Dave Airlie 已提交
674

675 676
	return 0;
}
D
Dave Airlie 已提交
677

678 679 680 681 682 683

/*
 * Reserve one free slot in the blit queue. Will wait for one second for one
 * to become available. Otherwise -EBUSY is returned.
 */

D
Dave Airlie 已提交
684
static int
685 686 687 688 689 690 691 692 693 694 695 696
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
	int ret=0;
	unsigned long irqsave;

	DRM_DEBUG("Num free is %d\n", blitq->num_free);
	spin_lock_irqsave(&blitq->blit_lock, irqsave);
	while(blitq->num_free == 0) {
		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);

		DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
		if (ret) {
E
Eric Anholt 已提交
697
			return (-EINTR == ret) ? -EAGAIN : ret;
698
		}
D
Dave Airlie 已提交
699

700 701
		spin_lock_irqsave(&blitq->blit_lock, irqsave);
	}
D
Dave Airlie 已提交
702

703 704 705 706 707 708 709 710 711 712
	blitq->num_free--;
	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);

	return 0;
}

/*
 * Hand back a free slot if we changed our mind.
 */

D
Dave Airlie 已提交
713
static void
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
via_dmablit_release_slot(drm_via_blitq_t *blitq)
{
	unsigned long irqsave;

	spin_lock_irqsave(&blitq->blit_lock, irqsave);
	blitq->num_free++;
	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
	DRM_WAKEUP( &blitq->busy_queue );
}

/*
 * Grab a free slot. Build blit info and queue a blit.
 */


D
Dave Airlie 已提交
729 730
static int
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
731 732 733 734
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_sg_info_t *vsg;
	drm_via_blitq_t *blitq;
735
	int ret;
736 737 738 739 740
	int engine;
	unsigned long irqsave;

	if (dev_priv == NULL) {
		DRM_ERROR("Called without initialization.\n");
E
Eric Anholt 已提交
741
		return -EINVAL;
742 743 744 745 746 747 748 749 750
	}

	engine = (xfer->to_fb) ? 0 : 1;
	blitq = dev_priv->blit_queues + engine;
	if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
		return ret;
	}
	if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
		via_dmablit_release_slot(blitq);
E
Eric Anholt 已提交
751
		return -ENOMEM;
752 753 754 755 756 757 758 759 760
	}
	if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
		via_dmablit_release_slot(blitq);
		kfree(vsg);
		return ret;
	}
	spin_lock_irqsave(&blitq->blit_lock, irqsave);

	blitq->blits[blitq->head++] = vsg;
D
Dave Airlie 已提交
761
	if (blitq->head >= VIA_NUM_BLIT_SLOTS)
762 763
		blitq->head = 0;
	blitq->num_outstanding++;
D
Dave Airlie 已提交
764
	xfer->sync.sync_handle = ++blitq->cur_blit_handle;
765 766 767 768

	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
	xfer->sync.engine = engine;

D
Dave Airlie 已提交
769
	via_dmablit_handler(dev, engine, 0);
770 771 772 773 774 775

	return 0;
}

/*
 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
776
 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
D
Dave Airlie 已提交
777
 * case it returns with -EAGAIN for the signal to be delivered.
778 779 780 781
 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
 */

int
782
via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
783
{
784
	drm_via_blitsync_t *sync = data;
785 786
	int err;

D
Dave Airlie 已提交
787
	if (sync->engine >= VIA_NUM_BLIT_ENGINES)
E
Eric Anholt 已提交
788
		return -EINVAL;
789

790
	err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
791

E
Eric Anholt 已提交
792 793
	if (-EINTR == err)
		err = -EAGAIN;
794 795 796

	return err;
}
D
Dave Airlie 已提交
797

798 799 800

/*
 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
D
Dave Airlie 已提交
801
 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
802 803 804
 * be reissued. See the above IOCTL code.
 */

D
Dave Airlie 已提交
805
int
806
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
807
{
808
	drm_via_dmablit_t *xfer = data;
809 810
	int err;

811
	err = via_dmablit(dev, xfer);
812 813 814

	return err;
}