vsp1_dl.c 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * vsp1_dl.h  --  R-Car VSP1 Display List
 *
 * Copyright (C) 2015 Renesas Corporation
 *
 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/slab.h>
18
#include <linux/workqueue.h>
19 20 21 22

#include "vsp1.h"
#include "vsp1_dl.h"

23
#define VSP1_DL_NUM_ENTRIES		256
24

25 26 27
#define VSP1_DLH_INT_ENABLE		(1 << 1)
#define VSP1_DLH_AUTO_START		(1 << 0)

28 29 30 31 32
struct vsp1_dl_header_list {
	u32 num_bytes;
	u32 addr;
} __attribute__((__packed__));

33 34
struct vsp1_dl_header {
	u32 num_lists;
35
	struct vsp1_dl_header_list lists[8];
36 37 38 39
	u32 next_header;
	u32 flags;
} __attribute__((__packed__));

40 41 42 43 44
struct vsp1_dl_entry {
	u32 addr;
	u32 data;
} __attribute__((__packed__));

45 46 47 48 49 50 51 52 53 54
/**
 * struct vsp1_dl_body - Display list body
 * @list: entry in the display list list of bodies
 * @vsp1: the VSP1 device
 * @entries: array of entries
 * @dma: DMA address of the entries
 * @size: size of the DMA memory in bytes
 * @num_entries: number of stored entries
 */
struct vsp1_dl_body {
55
	struct list_head list;
56 57 58 59 60 61 62 63
	struct vsp1_device *vsp1;

	struct vsp1_dl_entry *entries;
	dma_addr_t dma;
	size_t size;

	unsigned int num_entries;
};
64

65 66 67 68 69 70 71 72
/**
 * struct vsp1_dl_list - Display list
 * @list: entry in the display list manager lists
 * @dlm: the display list manager
 * @header: display list header, NULL for headerless lists
 * @dma: DMA address for the header
 * @body0: first display list body
 * @fragments: list of extra display list bodies
73
 * @chain: entry in the display list partition chain
74 75 76
 */
struct vsp1_dl_list {
	struct list_head list;
77
	struct vsp1_dl_manager *dlm;
78

79
	struct vsp1_dl_header *header;
80 81
	dma_addr_t dma;

82 83
	struct vsp1_dl_body body0;
	struct list_head fragments;
84 85 86

	bool has_chain;
	struct list_head chain;
87 88
};

89 90 91 92 93
enum vsp1_dl_mode {
	VSP1_DL_MODE_HEADER,
	VSP1_DL_MODE_HEADERLESS,
};

94 95
/**
 * struct vsp1_dl_manager - Display List manager
96 97
 * @index: index of the related WPF
 * @mode: display list operation mode (header or headerless)
98
 * @vsp1: the VSP1 device
99
 * @lock: protects the free, active, queued, pending and gc_fragments lists
100 101 102 103
 * @free: array of all free display lists
 * @active: list currently being processed (loaded) by hardware
 * @queued: list queued to the hardware (written to the DL registers)
 * @pending: list waiting to be queued to the hardware
104 105
 * @gc_work: fragments garbage collector work struct
 * @gc_fragments: array of display list fragments waiting to be freed
106 107
 */
struct vsp1_dl_manager {
108 109
	unsigned int index;
	enum vsp1_dl_mode mode;
110 111 112 113 114 115 116
	struct vsp1_device *vsp1;

	spinlock_t lock;
	struct list_head free;
	struct vsp1_dl_list *active;
	struct vsp1_dl_list *queued;
	struct vsp1_dl_list *pending;
117 118 119

	struct work_struct gc_work;
	struct list_head gc_fragments;
120 121
};

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
/* -----------------------------------------------------------------------------
 * Display List Body Management
 */

/*
 * Initialize a display list body object and allocate DMA memory for the body
 * data. The display list body object is expected to have been initialized to
 * 0 when allocated.
 */
static int vsp1_dl_body_init(struct vsp1_device *vsp1,
			     struct vsp1_dl_body *dlb, unsigned int num_entries,
			     size_t extra_size)
{
	size_t size = num_entries * sizeof(*dlb->entries) + extra_size;

	dlb->vsp1 = vsp1;
	dlb->size = size;

	dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma,
				    GFP_KERNEL);
	if (!dlb->entries)
		return -ENOMEM;

	return 0;
}

/*
 * Cleanup a display list body and free allocated DMA memory allocated.
 */
static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
{
	dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma);
}

/**
 * vsp1_dl_fragment_alloc - Allocate a display list fragment
 * @vsp1: The VSP1 device
 * @num_entries: The maximum number of entries that the fragment can contain
 *
 * Allocate a display list fragment with enough memory to contain the requested
 * number of entries.
 *
 * Return a pointer to a fragment on success or NULL if memory can't be
 * allocated.
 */
struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
					    unsigned int num_entries)
{
	struct vsp1_dl_body *dlb;
	int ret;

	dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
	if (!dlb)
		return NULL;

	ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
	if (ret < 0) {
		kfree(dlb);
		return NULL;
	}

	return dlb;
}

/**
 * vsp1_dl_fragment_free - Free a display list fragment
 * @dlb: The fragment
 *
 * Free the given display list fragment and the associated DMA memory.
 *
 * Fragments must only be freed explicitly if they are not added to a display
 * list, as the display list will take ownership of them and free them
 * otherwise. Manual free typically happens at cleanup time for fragments that
 * have been allocated but not used.
 *
 * Passing a NULL pointer to this function is safe, in that case no operation
 * will be performed.
 */
void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
{
	if (!dlb)
		return;

	vsp1_dl_body_cleanup(dlb);
	kfree(dlb);
}

/**
 * vsp1_dl_fragment_write - Write a register to a display list fragment
 * @dlb: The fragment
 * @reg: The register address
 * @data: The register value
 *
 * Write the given register and value to the display list fragment. The maximum
 * number of entries that can be written in a fragment is specified when the
 * fragment is allocated by vsp1_dl_fragment_alloc().
 */
void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
{
	dlb->entries[dlb->num_entries].addr = reg;
	dlb->entries[dlb->num_entries].data = data;
	dlb->num_entries++;
}

226 227 228 229
/* -----------------------------------------------------------------------------
 * Display List Transaction Management
 */

230
static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
231
{
232
	struct vsp1_dl_list *dl;
233
	size_t header_size;
234
	int ret;
235

236 237 238
	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
	if (!dl)
		return NULL;
239

240
	INIT_LIST_HEAD(&dl->fragments);
241 242
	dl->dlm = dlm;

243 244 245 246 247 248 249 250 251 252 253 254
	/* Initialize the display list body and allocate DMA memory for the body
	 * and the optional header. Both are allocated together to avoid memory
	 * fragmentation, with the header located right after the body in
	 * memory.
	 */
	header_size = dlm->mode == VSP1_DL_MODE_HEADER
		    ? ALIGN(sizeof(struct vsp1_dl_header), 8)
		    : 0;

	ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
				header_size);
	if (ret < 0) {
255 256 257
		kfree(dl);
		return NULL;
	}
258

259
	if (dlm->mode == VSP1_DL_MODE_HEADER) {
260 261 262 263 264 265
		size_t header_offset = VSP1_DL_NUM_ENTRIES
				     * sizeof(*dl->body0.entries);

		dl->header = ((void *)dl->body0.entries) + header_offset;
		dl->dma = dl->body0.dma + header_offset;

266
		memset(dl->header, 0, sizeof(*dl->header));
267
		dl->header->lists[0].addr = dl->body0.dma;
268 269
	}

270 271
	return dl;
}
272

273 274
static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
{
275
	vsp1_dl_body_cleanup(&dl->body0);
276
	list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
277
	kfree(dl);
278 279
}

280 281 282 283 284 285 286 287 288
/**
 * vsp1_dl_list_get - Get a free display list
 * @dlm: The display list manager
 *
 * Get a display list from the pool of free lists and return it.
 *
 * This function must be called without the display list manager lock held.
 */
struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
289
{
290
	struct vsp1_dl_list *dl = NULL;
291 292
	unsigned long flags;

293
	spin_lock_irqsave(&dlm->lock, flags);
294

295 296 297
	if (!list_empty(&dlm->free)) {
		dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
		list_del(&dl->list);
298

299 300
		/*
		 * The display list chain must be initialised to ensure every
301 302 303
		 * display list can assert list_empty() if it is not in a chain.
		 */
		INIT_LIST_HEAD(&dl->chain);
304 305
	}

306 307 308 309
	spin_unlock_irqrestore(&dlm->lock, flags);

	return dl;
}
310

311 312 313
/* This function must be called with the display list manager lock held.*/
static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
314 315
	struct vsp1_dl_list *dl_child;

316 317 318
	if (!dl)
		return;

319 320
	/*
	 * Release any linked display-lists which were chained for a single
321 322 323 324 325 326 327 328 329
	 * hardware operation.
	 */
	if (dl->has_chain) {
		list_for_each_entry(dl_child, &dl->chain, chain)
			__vsp1_dl_list_put(dl_child);
	}

	dl->has_chain = false;

330 331
	/*
	 * We can't free fragments here as DMA memory can only be freed in
332 333 334 335 336 337 338 339 340
	 * interruptible context. Move all fragments to the display list
	 * manager's list of fragments to be freed, they will be
	 * garbage-collected by the work queue.
	 */
	if (!list_empty(&dl->fragments)) {
		list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
		schedule_work(&dl->dlm->gc_work);
	}

341
	dl->body0.num_entries = 0;
342 343 344 345

	list_add_tail(&dl->list, &dl->dlm->free);
}

346 347 348 349 350 351 352 353 354 355 356
/**
 * vsp1_dl_list_put - Release a display list
 * @dl: The display list
 *
 * Release the display list and return it to the pool of free lists.
 *
 * Passing a NULL pointer to this function is safe, in that case no operation
 * will be performed.
 */
void vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
357 358
	unsigned long flags;

359 360
	if (!dl)
		return;
361

362 363 364
	spin_lock_irqsave(&dl->dlm->lock, flags);
	__vsp1_dl_list_put(dl);
	spin_unlock_irqrestore(&dl->dlm->lock, flags);
365 366
}

367 368 369 370 371 372 373 374 375
/**
 * vsp1_dl_list_write - Write a register to the display list
 * @dl: The display list
 * @reg: The register address
 * @data: The register value
 *
 * Write the given register and value to the display list. Up to 256 registers
 * can be written per display list.
 */
376
void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
377
{
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
	vsp1_dl_fragment_write(&dl->body0, reg, data);
}

/**
 * vsp1_dl_list_add_fragment - Add a fragment to the display list
 * @dl: The display list
 * @dlb: The fragment
 *
 * Add a display list body as a fragment to a display list. Registers contained
 * in fragments are processed after registers contained in the main display
 * list, in the order in which fragments are added.
 *
 * Adding a fragment to a display list passes ownership of the fragment to the
 * list. The caller must not touch the fragment after this call, and must not
 * free it explicitly with vsp1_dl_fragment_free().
 *
 * Fragments are only usable for display lists in header mode. Attempt to
 * add a fragment to a header-less display list will return an error.
 */
int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
			      struct vsp1_dl_body *dlb)
{
	/* Multi-body lists are only available in header mode. */
	if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
		return -EINVAL;

	list_add_tail(&dlb->list, &dl->fragments);
	return 0;
406 407
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
/**
 * vsp1_dl_list_add_chain - Add a display list to a chain
 * @head: The head display list
 * @dl: The new display list
 *
 * Add a display list to an existing display list chain. The chained lists
 * will be automatically processed by the hardware without intervention from
 * the CPU. A display list end interrupt will only complete after the last
 * display list in the chain has completed processing.
 *
 * Adding a display list to a chain passes ownership of the display list to
 * the head display list item. The chain is released when the head dl item is
 * put back with __vsp1_dl_list_put().
 *
 * Chained display lists are only usable in header mode. Attempts to add a
 * display list to a chain in header-less mode will return an error.
 */
int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
			   struct vsp1_dl_list *dl)
{
	/* Chained lists are only available in header mode. */
	if (head->dlm->mode != VSP1_DL_MODE_HEADER)
		return -EINVAL;

	head->has_chain = true;
	list_add_tail(&dl->chain, &head->chain);
	return 0;
}

static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
{
	struct vsp1_dl_header_list *hdr = dl->header->lists;
	struct vsp1_dl_body *dlb;
	unsigned int num_lists = 0;

443 444
	/*
	 * Fill the header with the display list bodies addresses and sizes. The
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
	 * address of the first body has already been filled when the display
	 * list was allocated.
	 */

	hdr->num_bytes = dl->body0.num_entries
		       * sizeof(*dl->header->lists);

	list_for_each_entry(dlb, &dl->fragments, list) {
		num_lists++;
		hdr++;

		hdr->addr = dlb->dma;
		hdr->num_bytes = dlb->num_entries
			       * sizeof(*dl->header->lists);
	}

	dl->header->num_lists = num_lists;

463 464
	/*
	 * If this display list's chain is not empty, we are on a list, where
465 466 467 468 469 470 471 472 473 474 475 476 477
	 * the next item in the list is the display list entity which should be
	 * automatically queued by the hardware.
	 */
	if (!list_empty(&dl->chain) && !is_last) {
		struct vsp1_dl_list *next = list_next_entry(dl, chain);

		dl->header->next_header = next->dma;
		dl->header->flags = VSP1_DLH_AUTO_START;
	} else {
		dl->header->flags = VSP1_DLH_INT_ENABLE;
	}
}

478
void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
479
{
480 481
	struct vsp1_dl_manager *dlm = dl->dlm;
	struct vsp1_device *vsp1 = dlm->vsp1;
482 483 484
	unsigned long flags;
	bool update;

485
	spin_lock_irqsave(&dlm->lock, flags);
486

487
	if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
488 489
		struct vsp1_dl_list *dl_child;

490 491
		/*
		 * In header mode the caller guarantees that the hardware is
492
		 * idle at this point.
493
		 */
494

495 496
		/* Fill the header for the head and chained display lists. */
		vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
497

498 499 500 501
		list_for_each_entry(dl_child, &dl->chain, chain) {
			bool last = list_is_last(&dl_child->chain, &dl->chain);

			vsp1_dl_list_fill_header(dl_child, last);
502 503
		}

504 505
		/*
		 * Commit the head display list to hardware. Chained headers
506 507
		 * will auto-start.
		 */
508 509 510 511 512 513
		vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);

		dlm->active = dl;
		goto done;
	}

514 515 516 517 518 519 520
	/* Once the UPD bit has been set the hardware can start processing the
	 * display list at any time and we can't touch the address and size
	 * registers. In that case mark the update as pending, it will be
	 * queued up to the hardware by the frame end interrupt handler.
	 */
	update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
	if (update) {
521
		__vsp1_dl_list_put(dlm->pending);
522
		dlm->pending = dl;
523 524 525 526 527 528 529
		goto done;
	}

	/* Program the hardware with the display list body address and size.
	 * The UPD bit will be cleared by the device when the display list is
	 * processed.
	 */
530
	vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
531
	vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
532
		   (dl->body0.num_entries * sizeof(*dl->header->lists)));
533

534
	__vsp1_dl_list_put(dlm->queued);
535
	dlm->queued = dl;
536 537

done:
538
	spin_unlock_irqrestore(&dlm->lock, flags);
539 540 541
}

/* -----------------------------------------------------------------------------
542
 * Display List Manager
543 544
 */

545 546
/* Interrupt Handling */
void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
547
{
548
	spin_lock(&dlm->lock);
549 550 551 552 553

	/* The display start interrupt signals the end of the display list
	 * processing by the device. The active display list, if any, won't be
	 * accessed anymore and can be reused.
	 */
554
	__vsp1_dl_list_put(dlm->active);
555
	dlm->active = NULL;
556

557
	spin_unlock(&dlm->lock);
558 559
}

560
void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
561
{
562
	struct vsp1_device *vsp1 = dlm->vsp1;
563

564 565
	spin_lock(&dlm->lock);

566
	__vsp1_dl_list_put(dlm->active);
567
	dlm->active = NULL;
568

569 570 571 572 573 574 575
	/* Header mode is used for mem-to-mem pipelines only. We don't need to
	 * perform any operation as there can't be any new display list queued
	 * in that case.
	 */
	if (dlm->mode == VSP1_DL_MODE_HEADER)
		goto done;

576 577 578 579 580 581 582 583 584 585 586
	/* The UPD bit set indicates that the commit operation raced with the
	 * interrupt and occurred after the frame end event and UPD clear but
	 * before interrupt processing. The hardware hasn't taken the update
	 * into account yet, we'll thus skip one frame and retry.
	 */
	if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD)
		goto done;

	/* The device starts processing the queued display list right after the
	 * frame end interrupt. The display list thus becomes active.
	 */
587 588 589
	if (dlm->queued) {
		dlm->active = dlm->queued;
		dlm->queued = NULL;
590 591 592 593 594
	}

	/* Now that the UPD bit has been cleared we can queue the next display
	 * list to the hardware if one has been prepared.
	 */
595 596
	if (dlm->pending) {
		struct vsp1_dl_list *dl = dlm->pending;
597

598
		vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
599
		vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
600 601
			   (dl->body0.num_entries *
			    sizeof(*dl->header->lists)));
602

603 604
		dlm->queued = dl;
		dlm->pending = NULL;
605 606 607
	}

done:
608
	spin_unlock(&dlm->lock);
609 610
}

611 612
/* Hardware Setup */
void vsp1_dlm_setup(struct vsp1_device *vsp1)
613
{
614 615 616
	u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
		 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
		 | VI6_DL_CTRL_DLE;
617

618 619
	/* The DRM pipeline operates with display lists in Continuous Frame
	 * Mode, all other pipelines use manual start.
620 621
	 */
	if (vsp1->drm)
622
		ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
623 624 625 626 627

	vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
	vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
}

628 629
void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
{
630 631 632 633 634 635 636 637 638
	unsigned long flags;

	spin_lock_irqsave(&dlm->lock, flags);

	__vsp1_dl_list_put(dlm->active);
	__vsp1_dl_list_put(dlm->queued);
	__vsp1_dl_list_put(dlm->pending);

	spin_unlock_irqrestore(&dlm->lock, flags);
639 640 641 642 643

	dlm->active = NULL;
	dlm->queued = NULL;
	dlm->pending = NULL;
}
644

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
/*
 * Free all fragments awaiting to be garbage-collected.
 *
 * This function must be called without the display list manager lock held.
 */
static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
{
	unsigned long flags;

	spin_lock_irqsave(&dlm->lock, flags);

	while (!list_empty(&dlm->gc_fragments)) {
		struct vsp1_dl_body *dlb;

		dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
				       list);
		list_del(&dlb->list);

		spin_unlock_irqrestore(&dlm->lock, flags);
		vsp1_dl_fragment_free(dlb);
		spin_lock_irqsave(&dlm->lock, flags);
	}

	spin_unlock_irqrestore(&dlm->lock, flags);
}

static void vsp1_dlm_garbage_collect(struct work_struct *work)
{
	struct vsp1_dl_manager *dlm =
		container_of(work, struct vsp1_dl_manager, gc_work);

	vsp1_dlm_fragments_free(dlm);
}

679
struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
680
					unsigned int index,
681
					unsigned int prealloc)
682
{
683
	struct vsp1_dl_manager *dlm;
684 685
	unsigned int i;

686 687 688 689
	dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
	if (!dlm)
		return NULL;

690 691 692
	dlm->index = index;
	dlm->mode = index == 0 && !vsp1->info->uapi
		  ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
693
	dlm->vsp1 = vsp1;
694

695 696
	spin_lock_init(&dlm->lock);
	INIT_LIST_HEAD(&dlm->free);
697 698
	INIT_LIST_HEAD(&dlm->gc_fragments);
	INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
699

700 701
	for (i = 0; i < prealloc; ++i) {
		struct vsp1_dl_list *dl;
702

703 704
		dl = vsp1_dl_list_alloc(dlm);
		if (!dl)
705
			return NULL;
706

707
		list_add_tail(&dl->list, &dlm->free);
708 709
	}

710
	return dlm;
711 712
}

713
void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
714
{
715 716
	struct vsp1_dl_list *dl, *next;

717 718 719
	if (!dlm)
		return;

720 721
	cancel_work_sync(&dlm->gc_work);

722 723 724 725
	list_for_each_entry_safe(dl, next, &dlm->free, list) {
		list_del(&dl->list);
		vsp1_dl_list_free(dl);
	}
726 727

	vsp1_dlm_fragments_free(dlm);
728
}