vsp1_dl.c 16.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * vsp1_dl.h  --  R-Car VSP1 Display List
 *
 * Copyright (C) 2015 Renesas Corporation
 *
 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/slab.h>
18
#include <linux/workqueue.h>
19 20 21 22

#include "vsp1.h"
#include "vsp1_dl.h"

23
#define VSP1_DL_NUM_ENTRIES		256
24 25
#define VSP1_DL_NUM_LISTS		3

26 27 28
#define VSP1_DLH_INT_ENABLE		(1 << 1)
#define VSP1_DLH_AUTO_START		(1 << 0)

29 30 31 32 33
struct vsp1_dl_header_list {
	u32 num_bytes;
	u32 addr;
} __attribute__((__packed__));

34 35
struct vsp1_dl_header {
	u32 num_lists;
36
	struct vsp1_dl_header_list lists[8];
37 38 39 40
	u32 next_header;
	u32 flags;
} __attribute__((__packed__));

41 42 43 44 45
struct vsp1_dl_entry {
	u32 addr;
	u32 data;
} __attribute__((__packed__));

46 47 48 49 50 51 52 53 54 55
/**
 * struct vsp1_dl_body - Display list body
 * @list: entry in the display list list of bodies
 * @vsp1: the VSP1 device
 * @entries: array of entries
 * @dma: DMA address of the entries
 * @size: size of the DMA memory in bytes
 * @num_entries: number of stored entries
 */
struct vsp1_dl_body {
56
	struct list_head list;
57 58 59 60 61 62 63 64
	struct vsp1_device *vsp1;

	struct vsp1_dl_entry *entries;
	dma_addr_t dma;
	size_t size;

	unsigned int num_entries;
};
65

66 67 68 69 70 71 72 73 74 75 76
/**
 * struct vsp1_dl_list - Display list
 * @list: entry in the display list manager lists
 * @dlm: the display list manager
 * @header: display list header, NULL for headerless lists
 * @dma: DMA address for the header
 * @body0: first display list body
 * @fragments: list of extra display list bodies
 */
struct vsp1_dl_list {
	struct list_head list;
77
	struct vsp1_dl_manager *dlm;
78

79
	struct vsp1_dl_header *header;
80 81
	dma_addr_t dma;

82 83
	struct vsp1_dl_body body0;
	struct list_head fragments;
84 85
};

86 87 88 89 90
enum vsp1_dl_mode {
	VSP1_DL_MODE_HEADER,
	VSP1_DL_MODE_HEADERLESS,
};

91 92
/**
 * struct vsp1_dl_manager - Display List manager
93 94
 * @index: index of the related WPF
 * @mode: display list operation mode (header or headerless)
95
 * @vsp1: the VSP1 device
96
 * @lock: protects the free, active, queued, pending and gc_fragments lists
97 98 99 100
 * @free: array of all free display lists
 * @active: list currently being processed (loaded) by hardware
 * @queued: list queued to the hardware (written to the DL registers)
 * @pending: list waiting to be queued to the hardware
101 102
 * @gc_work: fragments garbage collector work struct
 * @gc_fragments: array of display list fragments waiting to be freed
103 104
 */
struct vsp1_dl_manager {
105 106
	unsigned int index;
	enum vsp1_dl_mode mode;
107 108 109 110 111 112 113
	struct vsp1_device *vsp1;

	spinlock_t lock;
	struct list_head free;
	struct vsp1_dl_list *active;
	struct vsp1_dl_list *queued;
	struct vsp1_dl_list *pending;
114 115 116

	struct work_struct gc_work;
	struct list_head gc_fragments;
117 118
};

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
/* -----------------------------------------------------------------------------
 * Display List Body Management
 */

/*
 * Initialize a display list body object and allocate DMA memory for the body
 * data. The display list body object is expected to have been initialized to
 * 0 when allocated.
 */
static int vsp1_dl_body_init(struct vsp1_device *vsp1,
			     struct vsp1_dl_body *dlb, unsigned int num_entries,
			     size_t extra_size)
{
	size_t size = num_entries * sizeof(*dlb->entries) + extra_size;

	dlb->vsp1 = vsp1;
	dlb->size = size;

	dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma,
				    GFP_KERNEL);
	if (!dlb->entries)
		return -ENOMEM;

	return 0;
}

/*
 * Cleanup a display list body and free allocated DMA memory allocated.
 */
static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
{
	dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma);
}

/**
 * vsp1_dl_fragment_alloc - Allocate a display list fragment
 * @vsp1: The VSP1 device
 * @num_entries: The maximum number of entries that the fragment can contain
 *
 * Allocate a display list fragment with enough memory to contain the requested
 * number of entries.
 *
 * Return a pointer to a fragment on success or NULL if memory can't be
 * allocated.
 */
struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
					    unsigned int num_entries)
{
	struct vsp1_dl_body *dlb;
	int ret;

	dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
	if (!dlb)
		return NULL;

	ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
	if (ret < 0) {
		kfree(dlb);
		return NULL;
	}

	return dlb;
}

/**
 * vsp1_dl_fragment_free - Free a display list fragment
 * @dlb: The fragment
 *
 * Free the given display list fragment and the associated DMA memory.
 *
 * Fragments must only be freed explicitly if they are not added to a display
 * list, as the display list will take ownership of them and free them
 * otherwise. Manual free typically happens at cleanup time for fragments that
 * have been allocated but not used.
 *
 * Passing a NULL pointer to this function is safe, in that case no operation
 * will be performed.
 */
void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
{
	if (!dlb)
		return;

	vsp1_dl_body_cleanup(dlb);
	kfree(dlb);
}

/**
 * vsp1_dl_fragment_write - Write a register to a display list fragment
 * @dlb: The fragment
 * @reg: The register address
 * @data: The register value
 *
 * Write the given register and value to the display list fragment. The maximum
 * number of entries that can be written in a fragment is specified when the
 * fragment is allocated by vsp1_dl_fragment_alloc().
 */
void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
{
	dlb->entries[dlb->num_entries].addr = reg;
	dlb->entries[dlb->num_entries].data = data;
	dlb->num_entries++;
}

223 224 225 226
/* -----------------------------------------------------------------------------
 * Display List Transaction Management
 */

227
static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
228
{
229
	struct vsp1_dl_list *dl;
230
	size_t header_size;
231
	int ret;
232

233 234 235
	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
	if (!dl)
		return NULL;
236

237
	INIT_LIST_HEAD(&dl->fragments);
238 239
	dl->dlm = dlm;

240 241 242 243 244 245 246 247 248 249 250 251
	/* Initialize the display list body and allocate DMA memory for the body
	 * and the optional header. Both are allocated together to avoid memory
	 * fragmentation, with the header located right after the body in
	 * memory.
	 */
	header_size = dlm->mode == VSP1_DL_MODE_HEADER
		    ? ALIGN(sizeof(struct vsp1_dl_header), 8)
		    : 0;

	ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
				header_size);
	if (ret < 0) {
252 253 254
		kfree(dl);
		return NULL;
	}
255

256
	if (dlm->mode == VSP1_DL_MODE_HEADER) {
257 258 259 260 261 262
		size_t header_offset = VSP1_DL_NUM_ENTRIES
				     * sizeof(*dl->body0.entries);

		dl->header = ((void *)dl->body0.entries) + header_offset;
		dl->dma = dl->body0.dma + header_offset;

263
		memset(dl->header, 0, sizeof(*dl->header));
264
		dl->header->lists[0].addr = dl->body0.dma;
265 266 267
		dl->header->flags = VSP1_DLH_INT_ENABLE;
	}

268 269
	return dl;
}
270

271 272
static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
{
273
	vsp1_dl_body_cleanup(&dl->body0);
274
	list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
275
	kfree(dl);
276 277
}

278 279 280 281 282 283 284 285 286
/**
 * vsp1_dl_list_get - Get a free display list
 * @dlm: The display list manager
 *
 * Get a display list from the pool of free lists and return it.
 *
 * This function must be called without the display list manager lock held.
 */
struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
287
{
288
	struct vsp1_dl_list *dl = NULL;
289 290
	unsigned long flags;

291
	spin_lock_irqsave(&dlm->lock, flags);
292

293 294 295
	if (!list_empty(&dlm->free)) {
		dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
		list_del(&dl->list);
296 297
	}

298 299 300 301
	spin_unlock_irqrestore(&dlm->lock, flags);

	return dl;
}
302

303 304 305 306 307 308
/* This function must be called with the display list manager lock held.*/
static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
	if (!dl)
		return;

309 310 311 312 313 314 315 316 317 318
	/* We can't free fragments here as DMA memory can only be freed in
	 * interruptible context. Move all fragments to the display list
	 * manager's list of fragments to be freed, they will be
	 * garbage-collected by the work queue.
	 */
	if (!list_empty(&dl->fragments)) {
		list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
		schedule_work(&dl->dlm->gc_work);
	}

319
	dl->body0.num_entries = 0;
320 321 322 323

	list_add_tail(&dl->list, &dl->dlm->free);
}

324 325 326 327 328 329 330 331 332 333 334
/**
 * vsp1_dl_list_put - Release a display list
 * @dl: The display list
 *
 * Release the display list and return it to the pool of free lists.
 *
 * Passing a NULL pointer to this function is safe, in that case no operation
 * will be performed.
 */
void vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
335 336
	unsigned long flags;

337 338
	if (!dl)
		return;
339

340 341 342
	spin_lock_irqsave(&dl->dlm->lock, flags);
	__vsp1_dl_list_put(dl);
	spin_unlock_irqrestore(&dl->dlm->lock, flags);
343 344
}

345 346 347 348 349 350 351 352 353
/**
 * vsp1_dl_list_write - Write a register to the display list
 * @dl: The display list
 * @reg: The register address
 * @data: The register value
 *
 * Write the given register and value to the display list. Up to 256 registers
 * can be written per display list.
 */
354
void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
355
{
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	vsp1_dl_fragment_write(&dl->body0, reg, data);
}

/**
 * vsp1_dl_list_add_fragment - Add a fragment to the display list
 * @dl: The display list
 * @dlb: The fragment
 *
 * Add a display list body as a fragment to a display list. Registers contained
 * in fragments are processed after registers contained in the main display
 * list, in the order in which fragments are added.
 *
 * Adding a fragment to a display list passes ownership of the fragment to the
 * list. The caller must not touch the fragment after this call, and must not
 * free it explicitly with vsp1_dl_fragment_free().
 *
 * Fragments are only usable for display lists in header mode. Attempt to
 * add a fragment to a header-less display list will return an error.
 */
int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
			      struct vsp1_dl_body *dlb)
{
	/* Multi-body lists are only available in header mode. */
	if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
		return -EINVAL;

	list_add_tail(&dlb->list, &dl->fragments);
	return 0;
384 385
}

386
void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
387
{
388 389
	struct vsp1_dl_manager *dlm = dl->dlm;
	struct vsp1_device *vsp1 = dlm->vsp1;
390 391 392
	unsigned long flags;
	bool update;

393
	spin_lock_irqsave(&dlm->lock, flags);
394

395
	if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
396 397 398 399 400 401 402 403 404 405
		struct vsp1_dl_header_list *hdr = dl->header->lists;
		struct vsp1_dl_body *dlb;
		unsigned int num_lists = 0;

		/* Fill the header with the display list bodies addresses and
		 * sizes. The address of the first body has already been filled
		 * when the display list was allocated.
		 *
		 * In header mode the caller guarantees that the hardware is
		 * idle at this point.
406
		 */
407 408 409 410 411 412 413 414 415 416 417 418 419
		hdr->num_bytes = dl->body0.num_entries
			       * sizeof(*dl->header->lists);

		list_for_each_entry(dlb, &dl->fragments, list) {
			num_lists++;
			hdr++;

			hdr->addr = dlb->dma;
			hdr->num_bytes = dlb->num_entries
				       * sizeof(*dl->header->lists);
		}

		dl->header->num_lists = num_lists;
420 421 422 423 424 425
		vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);

		dlm->active = dl;
		goto done;
	}

426 427 428 429 430 431 432
	/* Once the UPD bit has been set the hardware can start processing the
	 * display list at any time and we can't touch the address and size
	 * registers. In that case mark the update as pending, it will be
	 * queued up to the hardware by the frame end interrupt handler.
	 */
	update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
	if (update) {
433
		__vsp1_dl_list_put(dlm->pending);
434
		dlm->pending = dl;
435 436 437 438 439 440 441
		goto done;
	}

	/* Program the hardware with the display list body address and size.
	 * The UPD bit will be cleared by the device when the display list is
	 * processed.
	 */
442
	vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
443
	vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
444
		   (dl->body0.num_entries * sizeof(*dl->header->lists)));
445

446
	__vsp1_dl_list_put(dlm->queued);
447
	dlm->queued = dl;
448 449

done:
450
	spin_unlock_irqrestore(&dlm->lock, flags);
451 452 453
}

/* -----------------------------------------------------------------------------
454
 * Display List Manager
455 456
 */

457 458
/* Interrupt Handling */
void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
459
{
460
	spin_lock(&dlm->lock);
461 462 463 464 465

	/* The display start interrupt signals the end of the display list
	 * processing by the device. The active display list, if any, won't be
	 * accessed anymore and can be reused.
	 */
466
	__vsp1_dl_list_put(dlm->active);
467
	dlm->active = NULL;
468

469
	spin_unlock(&dlm->lock);
470 471
}

472
void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
473
{
474
	struct vsp1_device *vsp1 = dlm->vsp1;
475

476 477
	spin_lock(&dlm->lock);

478
	__vsp1_dl_list_put(dlm->active);
479
	dlm->active = NULL;
480

481 482 483 484 485 486 487
	/* Header mode is used for mem-to-mem pipelines only. We don't need to
	 * perform any operation as there can't be any new display list queued
	 * in that case.
	 */
	if (dlm->mode == VSP1_DL_MODE_HEADER)
		goto done;

488 489 490 491 492 493 494 495 496 497 498
	/* The UPD bit set indicates that the commit operation raced with the
	 * interrupt and occurred after the frame end event and UPD clear but
	 * before interrupt processing. The hardware hasn't taken the update
	 * into account yet, we'll thus skip one frame and retry.
	 */
	if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD)
		goto done;

	/* The device starts processing the queued display list right after the
	 * frame end interrupt. The display list thus becomes active.
	 */
499 500 501
	if (dlm->queued) {
		dlm->active = dlm->queued;
		dlm->queued = NULL;
502 503 504 505 506
	}

	/* Now that the UPD bit has been cleared we can queue the next display
	 * list to the hardware if one has been prepared.
	 */
507 508
	if (dlm->pending) {
		struct vsp1_dl_list *dl = dlm->pending;
509

510
		vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
511
		vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
512 513
			   (dl->body0.num_entries *
			    sizeof(*dl->header->lists)));
514

515 516
		dlm->queued = dl;
		dlm->pending = NULL;
517 518 519
	}

done:
520
	spin_unlock(&dlm->lock);
521 522
}

523 524
/* Hardware Setup */
void vsp1_dlm_setup(struct vsp1_device *vsp1)
525
{
526 527 528
	u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
		 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
		 | VI6_DL_CTRL_DLE;
529

530 531
	/* The DRM pipeline operates with display lists in Continuous Frame
	 * Mode, all other pipelines use manual start.
532 533
	 */
	if (vsp1->drm)
534
		ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
535 536 537 538 539

	vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
	vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
}

540 541
void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
{
542 543 544 545 546 547 548 549 550
	unsigned long flags;

	spin_lock_irqsave(&dlm->lock, flags);

	__vsp1_dl_list_put(dlm->active);
	__vsp1_dl_list_put(dlm->queued);
	__vsp1_dl_list_put(dlm->pending);

	spin_unlock_irqrestore(&dlm->lock, flags);
551 552 553 554 555

	dlm->active = NULL;
	dlm->queued = NULL;
	dlm->pending = NULL;
}
556

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
/*
 * Free all fragments awaiting to be garbage-collected.
 *
 * This function must be called without the display list manager lock held.
 */
static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
{
	unsigned long flags;

	spin_lock_irqsave(&dlm->lock, flags);

	while (!list_empty(&dlm->gc_fragments)) {
		struct vsp1_dl_body *dlb;

		dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
				       list);
		list_del(&dlb->list);

		spin_unlock_irqrestore(&dlm->lock, flags);
		vsp1_dl_fragment_free(dlb);
		spin_lock_irqsave(&dlm->lock, flags);
	}

	spin_unlock_irqrestore(&dlm->lock, flags);
}

static void vsp1_dlm_garbage_collect(struct work_struct *work)
{
	struct vsp1_dl_manager *dlm =
		container_of(work, struct vsp1_dl_manager, gc_work);

	vsp1_dlm_fragments_free(dlm);
}

591
struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
592
					unsigned int index,
593
					unsigned int prealloc)
594
{
595
	struct vsp1_dl_manager *dlm;
596 597
	unsigned int i;

598 599 600 601
	dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
	if (!dlm)
		return NULL;

602 603 604
	dlm->index = index;
	dlm->mode = index == 0 && !vsp1->info->uapi
		  ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
605
	dlm->vsp1 = vsp1;
606

607 608
	spin_lock_init(&dlm->lock);
	INIT_LIST_HEAD(&dlm->free);
609 610
	INIT_LIST_HEAD(&dlm->gc_fragments);
	INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
611

612 613
	for (i = 0; i < prealloc; ++i) {
		struct vsp1_dl_list *dl;
614

615 616
		dl = vsp1_dl_list_alloc(dlm);
		if (!dl)
617
			return NULL;
618

619
		list_add_tail(&dl->list, &dlm->free);
620 621
	}

622
	return dlm;
623 624
}

625
void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
626
{
627 628
	struct vsp1_dl_list *dl, *next;

629 630 631
	if (!dlm)
		return;

632 633
	cancel_work_sync(&dlm->gc_work);

634 635 636 637
	list_for_each_entry_safe(dl, next, &dlm->free, list) {
		list_del(&dl->list);
		vsp1_dl_list_free(dl);
	}
638 639

	vsp1_dlm_fragments_free(dlm);
640
}