drm_mm.h 14.6 KB
Newer Older
1 2 3
/**************************************************************************
 *
 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
4
 * Copyright 2016 Intel Corporation
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/
/*
 * Authors:
 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 */

#ifndef _DRM_MM_H_
#define _DRM_MM_H_

/*
 * Generic range manager structs
 */
40
#include <linux/bug.h>
41
#include <linux/rbtree.h>
42
#include <linux/kernel.h>
43
#include <linux/list.h>
44
#include <linux/spinlock.h>
45 46 47
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
48 49 50
#ifdef CONFIG_DRM_DEBUG_MM
#include <linux/stackdepot.h>
#endif
51

52 53 54 55 56 57
#ifdef CONFIG_DRM_DEBUG_MM
#define DRM_MM_BUG_ON(expr) BUG_ON(expr)
#else
#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif

58 59 60
enum drm_mm_search_flags {
	DRM_MM_SEARCH_DEFAULT =		0,
	DRM_MM_SEARCH_BEST =		1 << 0,
61
	DRM_MM_SEARCH_BELOW =		1 << 1,
62 63
};

64 65 66 67 68 69 70 71
enum drm_mm_allocator_flags {
	DRM_MM_CREATE_DEFAULT =		0,
	DRM_MM_CREATE_TOP =		1 << 0,
};

#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP

72
struct drm_mm_node {
D
Daniel Vetter 已提交
73
	struct list_head node_list;
74
	struct list_head hole_stack;
75
	struct rb_node rb;
76
	unsigned hole_follows : 1;
77
	unsigned allocated : 1;
78
	bool scanned_block : 1;
79
	unsigned long color;
80 81
	u64 start;
	u64 size;
82
	u64 __subtree_last;
83
	struct drm_mm *mm;
84 85 86
#ifdef CONFIG_DRM_DEBUG_MM
	depot_stack_handle_t stack;
#endif
87 88 89
};

struct drm_mm {
L
Lucas De Marchi 已提交
90
	/* List of all memory nodes that immediately precede a free hole. */
91 92 93 94
	struct list_head hole_stack;
	/* head_node.node_list is the list of all memory nodes, ordered
	 * according to the (increasing) start address of the memory node. */
	struct drm_mm_node head_node;
95 96 97
	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
	struct rb_root interval_tree;

C
Chris Wilson 已提交
98 99
	void (*color_adjust)(const struct drm_mm_node *node,
			     unsigned long color,
100
			     u64 *start, u64 *end);
101 102 103 104 105 106 107 108 109

	unsigned long scan_active;
};

struct drm_mm_scan {
	struct drm_mm *mm;

	u64 size;
	u64 alignment;
110
	u64 remainder_mask;
111 112 113 114 115 116 117 118

	u64 range_start;
	u64 range_end;

	u64 hit_start;
	u64 hit_end;

	unsigned long color;
119
	unsigned int flags;
120 121
};

122 123 124 125
/**
 * drm_mm_node_allocated - checks whether a node is allocated
 * @node: drm_mm_node to check
 *
126 127 128 129
 * Drivers are required to clear a node prior to using it with the
 * drm_mm range manager.
 *
 * Drivers should use this helper for proper encapsulation of drm_mm
130 131 132 133 134
 * internals.
 *
 * Returns:
 * True if the @node is allocated.
 */
C
Chris Wilson 已提交
135
static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
136 137 138 139
{
	return node->allocated;
}

140 141 142 143
/**
 * drm_mm_initialized - checks whether an allocator is initialized
 * @mm: drm_mm to check
 *
144 145 146 147
 * Drivers should clear the struct drm_mm prior to initialisation if they
 * want to use this function.
 *
 * Drivers should use this helper for proper encapsulation of drm_mm
148 149 150 151 152
 * internals.
 *
 * Returns:
 * True if the @mm is initialized.
 */
C
Chris Wilson 已提交
153
static inline bool drm_mm_initialized(const struct drm_mm *mm)
154
{
155
	return mm->hole_stack.next;
156
}
157

C
Chris Wilson 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
/**
 * drm_mm_hole_follows - checks whether a hole follows this node
 * @node: drm_mm_node to check
 *
 * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
 * If you wish to know whether a hole follows this particular node,
 * query this function.
 *
 * Returns:
 * True if a hole follows the @node.
 */
static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
{
	return node->hole_follows;
}

C
Chris Wilson 已提交
174
static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
175 176 177 178
{
	return hole_node->start + hole_node->size;
}

179 180 181 182
/**
 * drm_mm_hole_node_start - computes the start of the hole following @node
 * @hole_node: drm_mm_node which implicitly tracks the following hole
 *
183 184
 * This is useful for driver-specific debug dumpers. Otherwise drivers should
 * not inspect holes themselves. Drivers must check first whether a hole indeed
C
Chris Wilson 已提交
185
 * follows by looking at drm_mm_hole_follows()
186 187 188 189
 *
 * Returns:
 * Start of the subsequent hole.
 */
C
Chris Wilson 已提交
190
static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
191
{
C
Chris Wilson 已提交
192
	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
193 194 195
	return __drm_mm_hole_node_start(hole_node);
}

C
Chris Wilson 已提交
196
static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
197
{
G
Geliang Tang 已提交
198
	return list_next_entry(hole_node, node_list)->start;
199 200
}

201 202 203 204
/**
 * drm_mm_hole_node_end - computes the end of the hole following @node
 * @hole_node: drm_mm_node which implicitly tracks the following hole
 *
205 206
 * This is useful for driver-specific debug dumpers. Otherwise drivers should
 * not inspect holes themselves. Drivers must check first whether a hole indeed
C
Chris Wilson 已提交
207
 * follows by looking at drm_mm_hole_follows().
208 209 210 211
 *
 * Returns:
 * End of the subsequent hole.
 */
C
Chris Wilson 已提交
212
static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
213 214 215 216
{
	return __drm_mm_hole_node_end(hole_node);
}

217 218 219 220 221 222 223 224 225 226 227 228 229
/**
 * drm_mm_nodes - list of nodes under the drm_mm range manager
 * @mm: the struct drm_mm range manger
 *
 * As the drm_mm range manager hides its node_list deep with its
 * structure, extracting it looks painful and repetitive. This is
 * not expected to be used outside of the drm_mm_for_each_node()
 * macros and similar internal functions.
 *
 * Returns:
 * The node list, may be empty.
 */
#define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
230

231 232 233 234 235 236 237 238
/**
 * drm_mm_for_each_node - iterator to walk over all allocated nodes
 * @entry: drm_mm_node structure to assign to in each iteration step
 * @mm: drm_mm allocator to walk
 *
 * This iterator walks over all nodes in the range allocator. It is implemented
 * with list_for_each, so not save against removal of elements.
 */
239
#define drm_mm_for_each_node(entry, mm) \
240
	list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
241 242 243 244 245 246 247 248 249 250 251

/**
 * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
 * @entry: drm_mm_node structure to assign to in each iteration step
 * @next: drm_mm_node structure to store the next step
 * @mm: drm_mm allocator to walk
 *
 * This iterator walks over all nodes in the range allocator. It is implemented
 * with list_for_each_safe, so save against removal of elements.
 */
#define drm_mm_for_each_node_safe(entry, next, mm) \
252
	list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
253

254 255 256 257 258 259 260 261
#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
	for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
	     &entry->hole_stack != &(mm)->hole_stack ? \
	     hole_start = drm_mm_hole_node_start(entry), \
	     hole_end = drm_mm_hole_node_end(entry), \
	     1 : 0; \
	     entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
/**
 * drm_mm_for_each_hole - iterator to walk over all holes
 * @entry: drm_mm_node used internally to track progress
 * @mm: drm_mm allocator to walk
 * @hole_start: ulong variable to assign the hole start to on each iteration
 * @hole_end: ulong variable to assign the hole end to on each iteration
 *
 * This iterator walks over all holes in the range allocator. It is implemented
 * with list_for_each, so not save against removal of elements. @entry is used
 * internally and will not reflect a real drm_mm_node for the very first hole.
 * Hence users of this iterator may not access it.
 *
 * Implementation Note:
 * We need to inline list_for_each_entry in order to be able to set hole_start
 * and hole_end on each iteration while keeping the macro sane.
277 278 279
 *
 * The __drm_mm_for_each_hole version is similar, but with added support for
 * going backwards.
280 281
 */
#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
282
	__drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
283

284 285 286
/*
 * Basic range manager support (drm_mm.c)
 */
287 288 289
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
					struct drm_mm_node *node,
290
					u64 size,
291
					u64 alignment,
292
					unsigned long color,
293 294
					u64 start,
					u64 end,
295 296
					enum drm_mm_search_flags sflags,
					enum drm_mm_allocator_flags aflags);
297

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
/**
 * drm_mm_insert_node_in_range - ranged search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @start: start of the allowed range for this node
 * @end: end of the allowed range for this node
 * @flags: flags to fine-tune the allocation
 *
 * This is a simplified version of drm_mm_insert_node_in_range_generic() with
 * @color set to 0.
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
 */
316 317
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
					      struct drm_mm_node *node,
318
					      u64 size,
319
					      u64 alignment,
320 321
					      u64 start,
					      u64 end,
322 323 324
					      enum drm_mm_search_flags flags)
{
	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
325 326
						   0, start, end, flags,
						   DRM_MM_CREATE_DEFAULT);
327 328
}

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
/**
 * drm_mm_insert_node_generic - search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for this node
 * @sflags: flags to fine-tune the allocation search
 * @aflags: flags to fine-tune the allocation behavior
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
 */
static inline int
drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
			   u64 size, u64 alignment,
			   unsigned long color,
			   enum drm_mm_search_flags sflags,
			   enum drm_mm_allocator_flags aflags)
{
	return drm_mm_insert_node_in_range_generic(mm, node,
						   size, alignment, 0,
						   0, U64_MAX,
						   sflags, aflags);
}

/**
 * drm_mm_insert_node - search for space and insert @node
 * @mm: drm_mm to allocate from
 * @node: preallocate node to insert
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @flags: flags to fine-tune the allocation
 *
 * This is a simplified version of drm_mm_insert_node_generic() with @color set
 * to 0.
 *
 * The preallocated node must be cleared to 0.
 *
 * Returns:
 * 0 on success, -ENOSPC if there's no suitable hole.
 */
static inline int drm_mm_insert_node(struct drm_mm *mm,
				     struct drm_mm_node *node,
				     u64 size,
				     u64 alignment,
				     enum drm_mm_search_flags flags)
{
	return drm_mm_insert_node_generic(mm, node,
					  size, alignment, 0,
					  flags, DRM_MM_CREATE_DEFAULT);
}

384 385
void drm_mm_remove_node(struct drm_mm_node *node);
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
C
Chris Wilson 已提交
386
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
387
void drm_mm_takedown(struct drm_mm *mm);
C
Chris Wilson 已提交
388 389 390 391 392 393 394 395 396 397 398 399 400

/**
 * drm_mm_clean - checks whether an allocator is clean
 * @mm: drm_mm allocator to check
 *
 * Returns:
 * True if the allocator is completely free, false if there's still a node
 * allocated in it.
 */
static inline bool drm_mm_clean(const struct drm_mm *mm)
{
	return list_empty(drm_mm_nodes(mm));
}
401

402
struct drm_mm_node *
C
Chris Wilson 已提交
403
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
404

405 406 407
/**
 * drm_mm_for_each_node_in_range - iterator to walk over a range of
 * allocated nodes
408 409 410 411
 * @node__: drm_mm_node structure to assign to in each iteration step
 * @mm__: drm_mm allocator to walk
 * @start__: starting offset, the first node will overlap this
 * @end__: ending offset, the last node will start before this (but may overlap)
412 413 414 415 416 417 418
 *
 * This iterator walks over all nodes in the range allocator that lie
 * between @start and @end. It is implemented similarly to list_for_each(),
 * but using the internal interval tree to accelerate the search for the
 * starting node, and so not safe against removal of elements. It assumes
 * that @end is within (or is the upper limit of) the drm_mm allocator.
 */
419 420 421 422
#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)	\
	for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
	     node__ && node__->start < (end__);				\
	     node__ = list_next_entry(node__, node_list))
423

424 425
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
				 struct drm_mm *mm,
426 427 428
				 u64 size, u64 alignment, unsigned long color,
				 u64 start, u64 end,
				 unsigned int flags);
429 430 431 432 433 434 435 436

/**
 * drm_mm_scan_init - initialize lru scanning
 * @scan: scan state
 * @mm: drm_mm to scan
 * @size: size of the allocation
 * @alignment: alignment of the allocation
 * @color: opaque tag value to use for the allocation
437
 * @flags: flags to specify how the allocation will be performed afterwards
438 439
 *
 * This simply sets up the scanning routines with the parameters for the desired
440
 * hole.
441 442 443 444 445 446 447 448 449
 *
 * Warning:
 * As long as the scan list is non-empty, no other operations than
 * adding/removing nodes to/from the scan list are allowed.
 */
static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
				    struct drm_mm *mm,
				    u64 size,
				    u64 alignment,
450 451
				    unsigned long color,
				    unsigned int flags)
452
{
453 454 455 456
	drm_mm_scan_init_with_range(scan, mm,
				    size, alignment, color,
				    0, U64_MAX,
				    flags);
457 458
}

459 460 461 462
bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
			   struct drm_mm_node *node);
bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
			      struct drm_mm_node *node);
463
struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
464

C
Chris Wilson 已提交
465
void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix);
466
#ifdef CONFIG_DEBUG_FS
C
Chris Wilson 已提交
467
int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm);
468 469
#endif

470
#endif