vsp1_dl.c 30.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
 * vsp1_dl.c  --  R-Car VSP1 Display List
4 5 6 7 8 9 10 11 12
 *
 * Copyright (C) 2015 Renesas Corporation
 *
 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
 */

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
13
#include <linux/refcount.h>
14
#include <linux/slab.h>
15
#include <linux/workqueue.h>
16 17 18 19

#include "vsp1.h"
#include "vsp1_dl.h"

20
#define VSP1_DL_NUM_ENTRIES		256
21

22 23 24
#define VSP1_DLH_INT_ENABLE		(1 << 1)
#define VSP1_DLH_AUTO_START		(1 << 0)

25 26 27
#define VSP1_DLH_EXT_PRE_CMD_EXEC	(1 << 9)
#define VSP1_DLH_EXT_POST_CMD_EXEC	(1 << 8)

28 29 30
struct vsp1_dl_header_list {
	u32 num_bytes;
	u32 addr;
31
} __packed;
32

33 34
struct vsp1_dl_header {
	u32 num_lists;
35
	struct vsp1_dl_header_list lists[8];
36 37
	u32 next_header;
	u32 flags;
38
} __packed;
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
/**
 * struct vsp1_dl_ext_header - Extended display list header
 * @padding: padding zero bytes for alignment
 * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse
 * @flags: enables or disables execution of the pre and post command
 * @pre_ext_dl_plist: start address of pre-extended display list bodies
 * @post_ext_dl_num_cmd: number of post-extended command bodies to parse
 * @post_ext_dl_plist: start address of post-extended display list bodies
 */
struct vsp1_dl_ext_header {
	u32 padding;

	/*
	 * The datasheet represents flags as stored before pre_ext_dl_num_cmd,
	 * expecting 32-bit accesses. The flags are appropriate to the whole
	 * header, not just the pre_ext command, and thus warrant being
	 * separated out. Due to byte ordering, and representing as 16 bit
	 * values here, the flags must be positioned after the
	 * pre_ext_dl_num_cmd.
	 */
	u16 pre_ext_dl_num_cmd;
	u16 flags;
	u32 pre_ext_dl_plist;

	u32 post_ext_dl_num_cmd;
	u32 post_ext_dl_plist;
} __packed;

struct vsp1_dl_header_extended {
	struct vsp1_dl_header header;
	struct vsp1_dl_ext_header ext;
} __packed;

73 74 75
struct vsp1_dl_entry {
	u32 addr;
	u32 data;
76
} __packed;
77

78 79 80 81 82 83 84 85 86 87 88 89 90 91
/**
 * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body
 * @opcode: Extended display list command operation code
 * @flags: Pre-extended command flags. These are specific to each command
 * @address_set: Source address set pointer. Must have 16-byte alignment
 * @reserved: Zero bits for alignment.
 */
struct vsp1_pre_ext_dl_body {
	u32 opcode;
	u32 flags;
	u32 address_set;
	u32 reserved;
} __packed;

92 93 94
/**
 * struct vsp1_dl_body - Display list body
 * @list: entry in the display list list of bodies
95
 * @free: entry in the pool free body list
96
 * @refcnt: reference tracking for the body
97
 * @pool: pool to which this body belongs
98 99 100 101
 * @entries: array of entries
 * @dma: DMA address of the entries
 * @size: size of the DMA memory in bytes
 * @num_entries: number of stored entries
102
 * @max_entries: number of entries available
103 104
 */
struct vsp1_dl_body {
105
	struct list_head list;
106 107
	struct list_head free;

108 109
	refcount_t refcnt;

110
	struct vsp1_dl_body_pool *pool;
111 112 113 114 115 116

	struct vsp1_dl_entry *entries;
	dma_addr_t dma;
	size_t size;

	unsigned int num_entries;
117
	unsigned int max_entries;
118
};
119

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
/**
 * struct vsp1_dl_body_pool - display list body pool
 * @dma: DMA address of the entries
 * @size: size of the full DMA memory pool in bytes
 * @mem: CPU memory pointer for the pool
 * @bodies: Array of DLB structures for the pool
 * @free: List of free DLB entries
 * @lock: Protects the free list
 * @vsp1: the VSP1 device
 */
struct vsp1_dl_body_pool {
	/* DMA allocation */
	dma_addr_t dma;
	size_t size;
	void *mem;

	/* Body management */
	struct vsp1_dl_body *bodies;
	struct list_head free;
	spinlock_t lock;

	struct vsp1_device *vsp1;
};

144
/**
145
 * struct vsp1_dl_cmd_pool - Display List commands pool
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
 * @dma: DMA address of the entries
 * @size: size of the full DMA memory pool in bytes
 * @mem: CPU memory pointer for the pool
 * @cmds: Array of command structures for the pool
 * @free: Free pool entries
 * @lock: Protects the free list
 * @vsp1: the VSP1 device
 */
struct vsp1_dl_cmd_pool {
	/* DMA allocation */
	dma_addr_t dma;
	size_t size;
	void *mem;

	struct vsp1_dl_ext_cmd *cmds;
	struct list_head free;

	spinlock_t lock;

	struct vsp1_device *vsp1;
};

168 169 170 171
/**
 * struct vsp1_dl_list - Display list
 * @list: entry in the display list manager lists
 * @dlm: the display list manager
172
 * @header: display list header
173
 * @extension: extended display list header. NULL for normal lists
174 175
 * @dma: DMA address for the header
 * @body0: first display list body
176
 * @bodies: list of extra display list bodies
177 178
 * @pre_cmd: pre command to be issued through extended dl header
 * @post_cmd: post command to be issued through extended dl header
179
 * @has_chain: if true, indicates that there's a partition chain
180
 * @chain: entry in the display list partition chain
181
 * @flags: display list flags, a combination of VSP1_DL_FRAME_END_*
182 183 184
 */
struct vsp1_dl_list {
	struct list_head list;
185
	struct vsp1_dl_manager *dlm;
186

187
	struct vsp1_dl_header *header;
188
	struct vsp1_dl_ext_header *extension;
189 190
	dma_addr_t dma;

191
	struct vsp1_dl_body *body0;
192
	struct list_head bodies;
193

194 195 196
	struct vsp1_dl_ext_cmd *pre_cmd;
	struct vsp1_dl_ext_cmd *post_cmd;

197 198
	bool has_chain;
	struct list_head chain;
199

200
	unsigned int flags;
201 202
};

203 204
/**
 * struct vsp1_dl_manager - Display List manager
205
 * @index: index of the related WPF
206
 * @singleshot: execute the display list in single-shot mode
207
 * @vsp1: the VSP1 device
208
 * @lock: protects the free, active, queued, and pending lists
209 210 211 212
 * @free: array of all free display lists
 * @active: list currently being processed (loaded) by hardware
 * @queued: list queued to the hardware (written to the DL registers)
 * @pending: list waiting to be queued to the hardware
213
 * @pool: body pool for the display list bodies
214
 * @cmdpool: commands pool for extended display list
215 216
 */
struct vsp1_dl_manager {
217
	unsigned int index;
218
	bool singleshot;
219 220 221 222 223 224 225
	struct vsp1_device *vsp1;

	spinlock_t lock;
	struct list_head free;
	struct vsp1_dl_list *active;
	struct vsp1_dl_list *queued;
	struct vsp1_dl_list *pending;
226

227
	struct vsp1_dl_body_pool *pool;
228
	struct vsp1_dl_cmd_pool *cmdpool;
229 230
};

231 232 233 234
/* -----------------------------------------------------------------------------
 * Display List Body Management
 */

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
/**
 * vsp1_dl_body_pool_create - Create a pool of bodies from a single allocation
 * @vsp1: The VSP1 device
 * @num_bodies: The number of bodies to allocate
 * @num_entries: The maximum number of entries that a body can contain
 * @extra_size: Extra allocation provided for the bodies
 *
 * Allocate a pool of display list bodies each with enough memory to contain the
 * requested number of entries plus the @extra_size.
 *
 * Return a pointer to a pool on success or NULL if memory can't be allocated.
 */
struct vsp1_dl_body_pool *
vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
			 unsigned int num_entries, size_t extra_size)
{
	struct vsp1_dl_body_pool *pool;
	size_t dlb_size;
	unsigned int i;

	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return NULL;

	pool->vsp1 = vsp1;

	/*
	 * TODO: 'extra_size' is only used by vsp1_dlm_create(), to allocate
	 * extra memory for the display list header. We need only one header per
	 * display list, not per display list body, thus this allocation is
	 * extraneous and should be reworked in the future.
	 */
	dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size;
	pool->size = dlb_size * num_bodies;

	pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL);
	if (!pool->bodies) {
		kfree(pool);
		return NULL;
	}

	pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
				 GFP_KERNEL);
	if (!pool->mem) {
		kfree(pool->bodies);
		kfree(pool);
		return NULL;
	}

	spin_lock_init(&pool->lock);
	INIT_LIST_HEAD(&pool->free);

	for (i = 0; i < num_bodies; ++i) {
		struct vsp1_dl_body *dlb = &pool->bodies[i];

		dlb->pool = pool;
		dlb->max_entries = num_entries;

		dlb->dma = pool->dma + i * dlb_size;
		dlb->entries = pool->mem + i * dlb_size;

		list_add_tail(&dlb->free, &pool->free);
	}

	return pool;
}

/**
 * vsp1_dl_body_pool_destroy - Release a body pool
 * @pool: The body pool
 *
 * Release all components of a pool allocation.
 */
void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool)
{
	if (!pool)
		return;

	if (pool->mem)
		dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
			    pool->dma);

	kfree(pool->bodies);
	kfree(pool);
}

/**
 * vsp1_dl_body_get - Obtain a body from a pool
 * @pool: The body pool
 *
 * Obtain a body from the pool without blocking.
 *
 * Returns a display list body or NULL if there are none available.
 */
struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool)
{
	struct vsp1_dl_body *dlb = NULL;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);

	if (!list_empty(&pool->free)) {
		dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free);
		list_del(&dlb->free);
339
		refcount_set(&dlb->refcnt, 1);
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
	}

	spin_unlock_irqrestore(&pool->lock, flags);

	return dlb;
}

/**
 * vsp1_dl_body_put - Return a body back to its pool
 * @dlb: The display list body
 *
 * Return a body back to the pool, and reset the num_entries to clear the list.
 */
void vsp1_dl_body_put(struct vsp1_dl_body *dlb)
{
	unsigned long flags;

	if (!dlb)
		return;

360 361 362
	if (!refcount_dec_and_test(&dlb->refcnt))
		return;

363 364 365 366 367 368 369
	dlb->num_entries = 0;

	spin_lock_irqsave(&dlb->pool->lock, flags);
	list_add_tail(&dlb->free, &dlb->pool->free);
	spin_unlock_irqrestore(&dlb->pool->lock, flags);
}

370
/**
371 372
 * vsp1_dl_body_write - Write a register to a display list body
 * @dlb: The body
373 374 375
 * @reg: The register address
 * @data: The register value
 *
376 377 378
 * Write the given register and value to the display list body. The maximum
 * number of entries that can be written in a body is specified when the body is
 * allocated by vsp1_dl_body_alloc().
379
 */
380
void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
381
{
382 383 384 385
	if (WARN_ONCE(dlb->num_entries >= dlb->max_entries,
		      "DLB size exceeded (max %u)", dlb->max_entries))
		return;

386 387 388 389 390
	dlb->entries[dlb->num_entries].addr = reg;
	dlb->entries[dlb->num_entries].data = data;
	dlb->num_entries++;
}

391
/* -----------------------------------------------------------------------------
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
 * Display List Extended Command Management
 */

enum vsp1_extcmd_type {
	VSP1_EXTCMD_AUTODISP,
	VSP1_EXTCMD_AUTOFLD,
};

struct vsp1_extended_command_info {
	u16 opcode;
	size_t body_size;
};

static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
	[VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
	[VSP1_EXTCMD_AUTOFLD]  = { 0x03, 160 },
};

/**
 * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation
 * @vsp1: The VSP1 device
 * @type: The command pool type
 * @num_cmds: The number of commands to allocate
 *
 * Allocate a pool of commands each with enough memory to contain the private
 * data of each command. The allocation sizes are dependent upon the command
 * type.
 *
 * Return a pointer to the pool on success or NULL if memory can't be allocated.
 */
static struct vsp1_dl_cmd_pool *
vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
			unsigned int num_cmds)
{
	struct vsp1_dl_cmd_pool *pool;
	unsigned int i;
	size_t cmd_size;

	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return NULL;

434 435
	pool->vsp1 = vsp1;

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	spin_lock_init(&pool->lock);
	INIT_LIST_HEAD(&pool->free);

	pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
	if (!pool->cmds) {
		kfree(pool);
		return NULL;
	}

	cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
		   vsp1_extended_commands[type].body_size;
	cmd_size = ALIGN(cmd_size, 16);

	pool->size = cmd_size * num_cmds;
	pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
				 GFP_KERNEL);
	if (!pool->mem) {
		kfree(pool->cmds);
		kfree(pool);
		return NULL;
	}

	for (i = 0; i < num_cmds; ++i) {
		struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
		size_t cmd_offset = i * cmd_size;
		/* data_offset must be 16 byte aligned for DMA. */
		size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
				     cmd_offset;

		cmd->pool = pool;
		cmd->opcode = vsp1_extended_commands[type].opcode;

		/*
		 * TODO: Auto-disp can utilise more than one extended body
		 * command per cmd.
		 */
		cmd->num_cmds = 1;
		cmd->cmds = pool->mem + cmd_offset;
		cmd->cmd_dma = pool->dma + cmd_offset;

		cmd->data = pool->mem + data_offset;
		cmd->data_dma = pool->dma + data_offset;

		list_add_tail(&cmd->free, &pool->free);
	}

	return pool;
}

static
struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
{
	struct vsp1_dl_ext_cmd *cmd = NULL;
	unsigned long flags;

	spin_lock_irqsave(&pool->lock, flags);

	if (!list_empty(&pool->free)) {
		cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
				       free);
		list_del(&cmd->free);
	}

	spin_unlock_irqrestore(&pool->lock, flags);

	return cmd;
}

static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
{
	unsigned long flags;

	if (!cmd)
		return;

	/* Reset flags, these mark data usage. */
	cmd->flags = 0;

	spin_lock_irqsave(&cmd->pool->lock, flags);
	list_add_tail(&cmd->free, &cmd->pool->free);
	spin_unlock_irqrestore(&cmd->pool->lock, flags);
}

static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
{
	if (!pool)
		return;

	if (pool->mem)
		dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
			    pool->dma);

	kfree(pool->cmds);
	kfree(pool);
}

struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
{
	struct vsp1_dl_manager *dlm = dl->dlm;

	if (dl->pre_cmd)
		return dl->pre_cmd;

	dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);

	return dl->pre_cmd;
}

/* ----------------------------------------------------------------------------
545 546 547
 * Display List Transaction Management
 */

548
static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
549
{
550
	struct vsp1_dl_list *dl;
551
	size_t header_offset;
552

553 554 555
	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
	if (!dl)
		return NULL;
556

557
	INIT_LIST_HEAD(&dl->bodies);
558 559
	dl->dlm = dlm;

560 561
	/* Get a default body for our list. */
	dl->body0 = vsp1_dl_body_get(dlm->pool);
562 563
	if (!dl->body0) {
		kfree(dl);
564
		return NULL;
565
	}
566

567
	header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
568

569 570 571 572 573
	dl->header = ((void *)dl->body0->entries) + header_offset;
	dl->dma = dl->body0->dma + header_offset;

	memset(dl->header, 0, sizeof(*dl->header));
	dl->header->lists[0].addr = dl->body0->dma;
574

575 576
	return dl;
}
577

578 579 580 581 582 583 584 585 586 587
static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl)
{
	struct vsp1_dl_body *dlb, *tmp;

	list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) {
		list_del(&dlb->list);
		vsp1_dl_body_put(dlb);
	}
}

588 589
static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
{
590 591 592
	vsp1_dl_body_put(dl->body0);
	vsp1_dl_list_bodies_put(dl);

593
	kfree(dl);
594 595
}

596 597 598 599 600 601 602 603 604
/**
 * vsp1_dl_list_get - Get a free display list
 * @dlm: The display list manager
 *
 * Get a display list from the pool of free lists and return it.
 *
 * This function must be called without the display list manager lock held.
 */
struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
605
{
606
	struct vsp1_dl_list *dl = NULL;
607 608
	unsigned long flags;

609
	spin_lock_irqsave(&dlm->lock, flags);
610

611 612 613
	if (!list_empty(&dlm->free)) {
		dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
		list_del(&dl->list);
614

615 616
		/*
		 * The display list chain must be initialised to ensure every
617 618 619
		 * display list can assert list_empty() if it is not in a chain.
		 */
		INIT_LIST_HEAD(&dl->chain);
620 621
	}

622 623 624 625
	spin_unlock_irqrestore(&dlm->lock, flags);

	return dl;
}
626

627 628 629
/* This function must be called with the display list manager lock held.*/
static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
630
	struct vsp1_dl_list *dl_next;
631

632 633 634
	if (!dl)
		return;

635 636
	/*
	 * Release any linked display-lists which were chained for a single
637 638 639
	 * hardware operation.
	 */
	if (dl->has_chain) {
640 641
		list_for_each_entry(dl_next, &dl->chain, chain)
			__vsp1_dl_list_put(dl_next);
642 643 644 645
	}

	dl->has_chain = false;

646 647
	vsp1_dl_list_bodies_put(dl);

648 649 650 651 652 653
	vsp1_dl_ext_cmd_put(dl->pre_cmd);
	vsp1_dl_ext_cmd_put(dl->post_cmd);

	dl->pre_cmd = NULL;
	dl->post_cmd = NULL;

654
	/*
655 656
	 * body0 is reused as as an optimisation as presently every display list
	 * has at least one body, thus we reinitialise the entries list.
657
	 */
658
	dl->body0->num_entries = 0;
659 660 661 662

	list_add_tail(&dl->list, &dl->dlm->free);
}

663 664 665 666 667 668 669 670 671 672 673
/**
 * vsp1_dl_list_put - Release a display list
 * @dl: The display list
 *
 * Release the display list and return it to the pool of free lists.
 *
 * Passing a NULL pointer to this function is safe, in that case no operation
 * will be performed.
 */
void vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
674 675
	unsigned long flags;

676 677
	if (!dl)
		return;
678

679 680 681
	spin_lock_irqsave(&dl->dlm->lock, flags);
	__vsp1_dl_list_put(dl);
	spin_unlock_irqrestore(&dl->dlm->lock, flags);
682 683
}

684
/**
685
 * vsp1_dl_list_get_body0 - Obtain the default body for the display list
686 687
 * @dl: The display list
 *
688 689
 * Obtain a pointer to the internal display list body allowing this to be passed
 * directly to configure operations.
690
 */
691
struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
692
{
693
	return dl->body0;
694 695 696
}

/**
697
 * vsp1_dl_list_add_body - Add a body to the display list
698
 * @dl: The display list
699
 * @dlb: The body
700
 *
701 702 703
 * Add a display list body to a display list. Registers contained in bodies are
 * processed after registers contained in the main display list, in the order in
 * which bodies are added.
704
 *
705
 * Adding a body to a display list passes ownership of the body to the list. The
706 707
 * caller retains its reference to the body when adding it to the display list,
 * but is not allowed to add new entries to the body.
708 709 710
 *
 * The reference must be explicitly released by a call to vsp1_dl_body_put()
 * when the body isn't needed anymore.
711
 */
712
int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
713
{
714 715
	refcount_inc(&dlb->refcnt);

716 717
	list_add_tail(&dlb->list, &dl->bodies);

718
	return 0;
719 720
}

721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
/**
 * vsp1_dl_list_add_chain - Add a display list to a chain
 * @head: The head display list
 * @dl: The new display list
 *
 * Add a display list to an existing display list chain. The chained lists
 * will be automatically processed by the hardware without intervention from
 * the CPU. A display list end interrupt will only complete after the last
 * display list in the chain has completed processing.
 *
 * Adding a display list to a chain passes ownership of the display list to
 * the head display list item. The chain is released when the head dl item is
 * put back with __vsp1_dl_list_put().
 */
int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
			   struct vsp1_dl_list *dl)
{
	head->has_chain = true;
	list_add_tail(&dl->chain, &head->chain);
	return 0;
}

743 744 745 746 747 748 749 750
static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
{
	cmd->cmds[0].opcode = cmd->opcode;
	cmd->cmds[0].flags = cmd->flags;
	cmd->cmds[0].address_set = cmd->data_dma;
	cmd->cmds[0].reserved = 0;
}

751 752
static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
{
753
	struct vsp1_dl_manager *dlm = dl->dlm;
754 755 756 757
	struct vsp1_dl_header_list *hdr = dl->header->lists;
	struct vsp1_dl_body *dlb;
	unsigned int num_lists = 0;

758 759
	/*
	 * Fill the header with the display list bodies addresses and sizes. The
760 761 762 763
	 * address of the first body has already been filled when the display
	 * list was allocated.
	 */

764
	hdr->num_bytes = dl->body0->num_entries
765 766
		       * sizeof(*dl->header->lists);

767
	list_for_each_entry(dlb, &dl->bodies, list) {
768 769 770 771 772 773 774 775 776
		num_lists++;
		hdr++;

		hdr->addr = dlb->dma;
		hdr->num_bytes = dlb->num_entries
			       * sizeof(*dl->header->lists);
	}

	dl->header->num_lists = num_lists;
777
	dl->header->flags = 0;
778

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
	/*
	 * Enable the interrupt for the end of each frame. In continuous mode
	 * chained lists are used with one list per frame, so enable the
	 * interrupt for each list. In singleshot mode chained lists are used
	 * to partition a single frame, so enable the interrupt for the last
	 * list only.
	 */
	if (!dlm->singleshot || is_last)
		dl->header->flags |= VSP1_DLH_INT_ENABLE;

	/*
	 * In continuous mode enable auto-start for all lists, as the VSP must
	 * loop on the same list until a new one is queued. In singleshot mode
	 * enable auto-start for all lists but the last to chain processing of
	 * partitions without software intervention.
	 */
	if (!dlm->singleshot || !is_last)
		dl->header->flags |= VSP1_DLH_AUTO_START;

	if (!is_last) {
799
		/*
800 801
		 * If this is not the last display list in the chain, queue the
		 * next item for automatic processing by the hardware.
802
		 */
803 804 805
		struct vsp1_dl_list *next = list_next_entry(dl, chain);

		dl->header->next_header = next->dma;
806 807 808 809 810 811 812
	} else if (!dlm->singleshot) {
		/*
		 * if the display list manager works in continuous mode, the VSP
		 * should loop over the display list continuously until
		 * instructed to do otherwise.
		 */
		dl->header->next_header = dl->dma;
813
	}
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834

	if (!dl->extension)
		return;

	dl->extension->flags = 0;

	if (dl->pre_cmd) {
		dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
		dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
		dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;

		vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
	}

	if (dl->post_cmd) {
		dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
		dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
		dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;

		vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
	}
835 836
}

837
static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
838
{
839
	struct vsp1_device *vsp1 = dlm->vsp1;
840

841 842
	if (!dlm->queued)
		return false;
843

844
	/*
845 846
	 * Check whether the VSP1 has taken the update. The hardware indicates
	 * this by clearing the UPDHDR bit in the CMD register.
847
	 */
848
	return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
849
}
850

851 852 853 854
static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
{
	struct vsp1_dl_manager *dlm = dl->dlm;
	struct vsp1_device *vsp1 = dlm->vsp1;
855

856 857 858 859 860 861 862 863
	/*
	 * Program the display list header address. If the hardware is idle
	 * (single-shot mode or first frame in continuous mode) it will then be
	 * started independently. If the hardware is operating, the
	 * VI6_DL_HDR_REF_ADDR register will be updated with the display list
	 * address.
	 */
	vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
864 865 866 867 868
}

static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
{
	struct vsp1_dl_manager *dlm = dl->dlm;
869

870
	/*
871 872 873 874 875
	 * If a previous display list has been queued to the hardware but not
	 * processed yet, the VSP can start processing it at any time. In that
	 * case we can't replace the queued list by the new one, as we could
	 * race with the hardware. We thus mark the update as pending, it will
	 * be queued up to the hardware by the frame end interrupt handler.
876 877 878
	 *
	 * If a display list is already pending we simply drop it as the new
	 * display list is assumed to contain a more recent configuration. It is
879 880 881 882 883
	 * an error if the already pending list has the
	 * VSP1_DL_FRAME_END_INTERNAL flag set, as there is then a process
	 * waiting for that list to complete. This shouldn't happen as the
	 * waiting process should perform proper locking, but warn just in
	 * case.
884
	 */
885
	if (vsp1_dl_list_hw_update_pending(dlm)) {
886 887
		WARN_ON(dlm->pending &&
			(dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL));
888
		__vsp1_dl_list_put(dlm->pending);
889
		dlm->pending = dl;
890
		return;
891 892
	}

893
	/*
894 895
	 * Pass the new display list to the hardware and mark it as queued. It
	 * will become active when the hardware starts processing it.
896
	 */
897
	vsp1_dl_list_hw_enqueue(dl);
898

899
	__vsp1_dl_list_put(dlm->queued);
900
	dlm->queued = dl;
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
}

static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
{
	struct vsp1_dl_manager *dlm = dl->dlm;

	/*
	 * When working in single-shot mode, the caller guarantees that the
	 * hardware is idle at this point. Just commit the head display list
	 * to hardware. Chained lists will be started automatically.
	 */
	vsp1_dl_list_hw_enqueue(dl);

	dlm->active = dl;
}

917
void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags)
918 919
{
	struct vsp1_dl_manager *dlm = dl->dlm;
920
	struct vsp1_dl_list *dl_next;
921 922
	unsigned long flags;

923 924
	/* Fill the header for the head and chained display lists. */
	vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
925

926 927
	list_for_each_entry(dl_next, &dl->chain, chain) {
		bool last = list_is_last(&dl_next->chain, &dl->chain);
928

929
		vsp1_dl_list_fill_header(dl_next, last);
930 931
	}

932
	dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED;
933

934 935 936 937 938 939
	spin_lock_irqsave(&dlm->lock, flags);

	if (dlm->singleshot)
		vsp1_dl_list_commit_singleshot(dl);
	else
		vsp1_dl_list_commit_continuous(dl);
940

941
	spin_unlock_irqrestore(&dlm->lock, flags);
942 943 944
}

/* -----------------------------------------------------------------------------
945
 * Display List Manager
946 947
 */

948 949 950 951
/**
 * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
 * @dlm: the display list manager
 *
952 953 954 955 956 957
 * Return a set of flags that indicates display list completion status.
 *
 * The VSP1_DL_FRAME_END_COMPLETED flag indicates that the previous display list
 * has completed at frame end. If the flag is not returned display list
 * completion has been delayed by one frame because the display list commit
 * raced with the frame end interrupt. The function always returns with the flag
958
 * set in single-shot mode as display list processing is then not continuous and
959
 * races never occur.
960
 *
961 962 963 964
 * The following flags are only supported for continuous mode.
 *
 * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the display list that just
 * became active had been queued with the internal notification flag.
965 966 967
 *
 * The VSP1_DL_FRAME_END_WRITEBACK flag indicates that the previously active
 * display list had been queued with the writeback flag.
968
 */
969
unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
970
{
971 972
	struct vsp1_device *vsp1 = dlm->vsp1;
	u32 status = vsp1_read(vsp1, VI6_STATUS);
973
	unsigned int flags = 0;
974

975 976
	spin_lock(&dlm->lock);

977
	/*
978 979
	 * The mem-to-mem pipelines work in single-shot mode. No new display
	 * list can be queued, we don't have to do anything.
980
	 */
981 982 983
	if (dlm->singleshot) {
		__vsp1_dl_list_put(dlm->active);
		dlm->active = NULL;
984
		flags |= VSP1_DL_FRAME_END_COMPLETED;
985
		goto done;
986
	}
987

988
	/*
989 990 991 992
	 * If the commit operation raced with the interrupt and occurred after
	 * the frame end event but before interrupt processing, the hardware
	 * hasn't taken the update into account yet. We have to skip one frame
	 * and retry.
993
	 */
994
	if (vsp1_dl_list_hw_update_pending(dlm))
995 996
		goto done;

997 998 999 1000 1001 1002 1003 1004
	/*
	 * Progressive streams report only TOP fields. If we have a BOTTOM
	 * field, we are interlaced, and expect the frame to complete on the
	 * next frame end interrupt.
	 */
	if (status & VI6_STATUS_FLD_STD(dlm->index))
		goto done;

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
	/*
	 * If the active display list has the writeback flag set, the frame
	 * completion marks the end of the writeback capture. Return the
	 * VSP1_DL_FRAME_END_WRITEBACK flag and reset the display list's
	 * writeback flag.
	 */
	if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) {
		flags |= VSP1_DL_FRAME_END_WRITEBACK;
		dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK;
	}

1016 1017
	/*
	 * The device starts processing the queued display list right after the
1018 1019
	 * frame end interrupt. The display list thus becomes active.
	 */
1020
	if (dlm->queued) {
1021
		if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL)
1022
			flags |= VSP1_DL_FRAME_END_INTERNAL;
1023
		dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL;
1024

1025
		__vsp1_dl_list_put(dlm->active);
1026 1027
		dlm->active = dlm->queued;
		dlm->queued = NULL;
1028
		flags |= VSP1_DL_FRAME_END_COMPLETED;
1029 1030
	}

1031
	/*
1032 1033 1034
	 * Now that the VSP has started processing the queued display list, we
	 * can queue the pending display list to the hardware if one has been
	 * prepared.
1035
	 */
1036
	if (dlm->pending) {
1037 1038
		vsp1_dl_list_hw_enqueue(dlm->pending);
		dlm->queued = dlm->pending;
1039
		dlm->pending = NULL;
1040 1041 1042
	}

done:
1043
	spin_unlock(&dlm->lock);
1044

1045
	return flags;
1046 1047
}

1048 1049
/* Hardware Setup */
void vsp1_dlm_setup(struct vsp1_device *vsp1)
1050
{
1051
	unsigned int i;
1052 1053 1054
	u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
		 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
		 | VI6_DL_CTRL_DLE;
1055 1056 1057 1058 1059 1060 1061
	u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
		   | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;

	if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
		for (i = 0; i < vsp1->info->wpf_count; ++i)
			vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
	}
1062 1063 1064 1065 1066

	vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
	vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
}

1067 1068
void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
{
1069 1070 1071 1072 1073 1074 1075 1076 1077
	unsigned long flags;

	spin_lock_irqsave(&dlm->lock, flags);

	__vsp1_dl_list_put(dlm->active);
	__vsp1_dl_list_put(dlm->queued);
	__vsp1_dl_list_put(dlm->pending);

	spin_unlock_irqrestore(&dlm->lock, flags);
1078 1079 1080 1081 1082

	dlm->active = NULL;
	dlm->queued = NULL;
	dlm->pending = NULL;
}
1083

1084 1085 1086 1087 1088
struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm)
{
	return vsp1_dl_body_get(dlm->pool);
}

1089
struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
1090
					unsigned int index,
1091
					unsigned int prealloc)
1092
{
1093
	struct vsp1_dl_manager *dlm;
1094
	size_t header_size;
1095 1096
	unsigned int i;

1097 1098 1099 1100
	dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
	if (!dlm)
		return NULL;

1101
	dlm->index = index;
1102
	dlm->singleshot = vsp1->info->uapi;
1103
	dlm->vsp1 = vsp1;
1104

1105 1106
	spin_lock_init(&dlm->lock);
	INIT_LIST_HEAD(&dlm->free);
1107 1108 1109

	/*
	 * Initialize the display list body and allocate DMA memory for the body
1110
	 * and the header. Both are allocated together to avoid memory
1111
	 * fragmentation, with the header located right after the body in
1112 1113
	 * memory. An extra body is allocated on top of the prealloc to account
	 * for the cached body used by the vsp1_pipeline object.
1114
	 */
1115 1116 1117 1118 1119
	header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
			sizeof(struct vsp1_dl_header_extended) :
			sizeof(struct vsp1_dl_header);

	header_size = ALIGN(header_size, 8);
1120

1121
	dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
1122 1123 1124
					     VSP1_DL_NUM_ENTRIES, header_size);
	if (!dlm->pool)
		return NULL;
1125

1126 1127
	for (i = 0; i < prealloc; ++i) {
		struct vsp1_dl_list *dl;
1128

1129
		dl = vsp1_dl_list_alloc(dlm);
1130 1131
		if (!dl) {
			vsp1_dlm_destroy(dlm);
1132
			return NULL;
1133
		}
1134

1135 1136 1137 1138 1139
		/* The extended header immediately follows the header. */
		if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
			dl->extension = (void *)dl->header
				      + sizeof(*dl->header);

1140
		list_add_tail(&dl->list, &dlm->free);
1141 1142
	}

1143 1144 1145 1146 1147 1148 1149 1150 1151
	if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
		dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
					VSP1_EXTCMD_AUTOFLD, prealloc);
		if (!dlm->cmdpool) {
			vsp1_dlm_destroy(dlm);
			return NULL;
		}
	}

1152
	return dlm;
1153 1154
}

1155
void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
1156
{
1157 1158
	struct vsp1_dl_list *dl, *next;

1159 1160 1161
	if (!dlm)
		return;

1162 1163 1164 1165
	list_for_each_entry_safe(dl, next, &dlm->free, list) {
		list_del(&dl->list);
		vsp1_dl_list_free(dl);
	}
1166

1167
	vsp1_dl_body_pool_destroy(dlm->pool);
1168
	vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
1169
}