drm_mm.c 9.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 43 44
 */

#include "drmP.h"
45
#include "drm_mm.h"
46 47
#include <linux/slab.h>

48 49
#define MM_UNUSED_TARGET 4

D
Dave Airlie 已提交
50
unsigned long drm_mm_tail_space(struct drm_mm *mm)
51 52
{
	struct list_head *tail_node;
D
Dave Airlie 已提交
53
	struct drm_mm_node *entry;
54 55

	tail_node = mm->ml_entry.prev;
D
Dave Airlie 已提交
56
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
57 58 59 60 61 62
	if (!entry->free)
		return 0;

	return entry->size;
}

D
Dave Airlie 已提交
63
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
64 65
{
	struct list_head *tail_node;
D
Dave Airlie 已提交
66
	struct drm_mm_node *entry;
67 68

	tail_node = mm->ml_entry.prev;
D
Dave Airlie 已提交
69
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
70 71 72 73 74 75 76 77 78 79
	if (!entry->free)
		return -ENOMEM;

	if (entry->size <= size)
		return -ENOMEM;

	entry->size -= size;
	return 0;
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
	struct drm_mm_node *child;

	if (atomic)
		child = kmalloc(sizeof(*child), GFP_ATOMIC);
	else
		child = kmalloc(sizeof(*child), GFP_KERNEL);

	if (unlikely(child == NULL)) {
		spin_lock(&mm->unused_lock);
		if (list_empty(&mm->unused_nodes))
			child = NULL;
		else {
			child =
			    list_entry(mm->unused_nodes.next,
				       struct drm_mm_node, fl_entry);
			list_del(&child->fl_entry);
			--mm->num_unused;
		}
		spin_unlock(&mm->unused_lock);
	}
	return child;
}

int drm_mm_pre_get(struct drm_mm *mm)
{
	struct drm_mm_node *node;

	spin_lock(&mm->unused_lock);
	while (mm->num_unused < MM_UNUSED_TARGET) {
		spin_unlock(&mm->unused_lock);
		node = kmalloc(sizeof(*node), GFP_KERNEL);
		spin_lock(&mm->unused_lock);

		if (unlikely(node == NULL)) {
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
			spin_unlock(&mm->unused_lock);
			return ret;
		}
		++mm->num_unused;
		list_add_tail(&node->fl_entry, &mm->unused_nodes);
	}
	spin_unlock(&mm->unused_lock);
	return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
127

D
Dave Airlie 已提交
128
static int drm_mm_create_tail_node(struct drm_mm *mm,
129 130
				   unsigned long start,
				   unsigned long size, int atomic)
131
{
D
Dave Airlie 已提交
132
	struct drm_mm_node *child;
133

134 135
	child = drm_mm_kmalloc(mm, atomic);
	if (unlikely(child == NULL))
136 137 138 139 140 141 142 143 144 145 146 147 148
		return -ENOMEM;

	child->free = 1;
	child->size = size;
	child->start = start;
	child->mm = mm;

	list_add_tail(&child->ml_entry, &mm->ml_entry);
	list_add_tail(&child->fl_entry, &mm->fl_entry);

	return 0;
}

149
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
150 151
{
	struct list_head *tail_node;
D
Dave Airlie 已提交
152
	struct drm_mm_node *entry;
153 154

	tail_node = mm->ml_entry.prev;
D
Dave Airlie 已提交
155
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
156
	if (!entry->free) {
157 158
		return drm_mm_create_tail_node(mm, entry->start + entry->size,
					       size, atomic);
159 160 161 162 163
	}
	entry->size += size;
	return 0;
}

D
Dave Airlie 已提交
164
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
165 166
						 unsigned long size,
						 int atomic)
167
{
D
Dave Airlie 已提交
168
	struct drm_mm_node *child;
169

170 171
	child = drm_mm_kmalloc(parent->mm, atomic);
	if (unlikely(child == NULL))
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
		return NULL;

	INIT_LIST_HEAD(&child->fl_entry);

	child->free = 0;
	child->size = size;
	child->start = parent->start;
	child->mm = parent->mm;

	list_add_tail(&child->ml_entry, &parent->ml_entry);
	INIT_LIST_HEAD(&child->fl_entry);

	parent->size -= size;
	parent->start += size;
	return child;
}


190

191 192
struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
				     unsigned long size, unsigned alignment)
193 194
{

D
Dave Airlie 已提交
195
	struct drm_mm_node *align_splitoff = NULL;
196
	unsigned tmp = 0;
197 198

	if (alignment)
199
		tmp = node->start % alignment;
200 201

	if (tmp) {
202
		align_splitoff =
203
		    drm_mm_split_at_start(node, alignment - tmp, 0);
204
		if (unlikely(align_splitoff == NULL))
205 206
			return NULL;
	}
207

208 209 210
	if (node->size == size) {
		list_del_init(&node->fl_entry);
		node->free = 0;
211
	} else {
212
		node = drm_mm_split_at_start(node, size, 0);
213
	}
214

215 216
	if (align_splitoff)
		drm_mm_put_block(align_splitoff);
217

218
	return node;
219
}
220

221
EXPORT_SYMBOL(drm_mm_get_block);
222

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
					    unsigned long size,
					    unsigned alignment)
{

	struct drm_mm_node *align_splitoff = NULL;
	struct drm_mm_node *child;
	unsigned tmp = 0;

	if (alignment)
		tmp = parent->start % alignment;

	if (tmp) {
		align_splitoff =
		    drm_mm_split_at_start(parent, alignment - tmp, 1);
		if (unlikely(align_splitoff == NULL))
			return NULL;
	}

	if (parent->size == size) {
		list_del_init(&parent->fl_entry);
		parent->free = 0;
		return parent;
	} else {
		child = drm_mm_split_at_start(parent, size, 1);
	}

	if (align_splitoff)
		drm_mm_put_block(align_splitoff);

	return child;
}
EXPORT_SYMBOL(drm_mm_get_block_atomic);

257 258 259 260 261
/*
 * Put a block. Merge with the previous and / or next block if they are free.
 * Otherwise add to the free stack.
 */

262
void drm_mm_put_block(struct drm_mm_node *cur)
263 264
{

D
Dave Airlie 已提交
265
	struct drm_mm *mm = cur->mm;
266
	struct list_head *cur_head = &cur->ml_entry;
267
	struct list_head *root_head = &mm->ml_entry;
D
Dave Airlie 已提交
268 269
	struct drm_mm_node *prev_node = NULL;
	struct drm_mm_node *next_node;
270

271
	int merged = 0;
272 273

	if (cur_head->prev != root_head) {
274 275
		prev_node =
		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
276 277
		if (prev_node->free) {
			prev_node->size += cur->size;
278
			merged = 1;
279 280 281
		}
	}
	if (cur_head->next != root_head) {
282 283
		next_node =
		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
284 285 286 287 288
		if (next_node->free) {
			if (merged) {
				prev_node->size += next_node->size;
				list_del(&next_node->ml_entry);
				list_del(&next_node->fl_entry);
289 290 291 292 293 294
				if (mm->num_unused < MM_UNUSED_TARGET) {
					list_add(&next_node->fl_entry,
						 &mm->unused_nodes);
					++mm->num_unused;
				} else
					kfree(next_node);
295 296 297
			} else {
				next_node->size += cur->size;
				next_node->start = cur->start;
298
				merged = 1;
299 300 301 302
			}
		}
	}
	if (!merged) {
303
		cur->free = 1;
304
		list_add(&cur->fl_entry, &mm->fl_entry);
305 306
	} else {
		list_del(&cur->ml_entry);
307 308 309 310 311
		if (mm->num_unused < MM_UNUSED_TARGET) {
			list_add(&cur->fl_entry, &mm->unused_nodes);
			++mm->num_unused;
		} else
			kfree(cur);
312 313
	}
}
314

315
EXPORT_SYMBOL(drm_mm_put_block);
316

317 318 319
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
				       unsigned long size,
				       unsigned alignment, int best_match)
320 321
{
	struct list_head *list;
322
	const struct list_head *free_stack = &mm->fl_entry;
D
Dave Airlie 已提交
323 324
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
325
	unsigned long best_size;
326
	unsigned wasted;
327 328 329 330 331

	best = NULL;
	best_size = ~0UL;

	list_for_each(list, free_stack) {
D
Dave Airlie 已提交
332
		entry = list_entry(list, struct drm_mm_node, fl_entry);
333 334 335 336 337 338 339 340 341 342 343 344
		wasted = 0;

		if (entry->size < size)
			continue;

		if (alignment) {
			register unsigned tmp = entry->start % alignment;
			if (tmp)
				wasted += alignment - tmp;
		}

		if (entry->size >= size + wasted) {
345 346 347 348 349 350 351 352 353 354 355
			if (!best_match)
				return entry;
			if (size < best_size) {
				best = entry;
				best_size = entry->size;
			}
		}
	}

	return best;
}
356
EXPORT_SYMBOL(drm_mm_search_free);
357

D
Dave Airlie 已提交
358
int drm_mm_clean(struct drm_mm * mm)
359
{
360
	struct list_head *head = &mm->ml_entry;
361

362 363
	return (head->next->next == head);
}
364
EXPORT_SYMBOL(drm_mm_clean);
365

D
Dave Airlie 已提交
366
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
367 368 369
{
	INIT_LIST_HEAD(&mm->ml_entry);
	INIT_LIST_HEAD(&mm->fl_entry);
370 371 372
	INIT_LIST_HEAD(&mm->unused_nodes);
	mm->num_unused = 0;
	spin_lock_init(&mm->unused_lock);
373

374
	return drm_mm_create_tail_node(mm, start, size, 0);
375
}
376
EXPORT_SYMBOL(drm_mm_init);
377

D
Dave Airlie 已提交
378
void drm_mm_takedown(struct drm_mm * mm)
379
{
380
	struct list_head *bnode = mm->fl_entry.next;
D
Dave Airlie 已提交
381
	struct drm_mm_node *entry;
382
	struct drm_mm_node *next;
383

D
Dave Airlie 已提交
384
	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
385

386 387
	if (entry->ml_entry.next != &mm->ml_entry ||
	    entry->fl_entry.next != &mm->fl_entry) {
388 389 390 391 392 393
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
		return;
	}

	list_del(&entry->fl_entry);
	list_del(&entry->ml_entry);
394 395 396 397 398 399 400 401 402
	kfree(entry);

	spin_lock(&mm->unused_lock);
	list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
		list_del(&entry->fl_entry);
		kfree(entry);
		--mm->num_unused;
	}
	spin_unlock(&mm->unused_lock);
403

404
	BUG_ON(mm->num_unused != 0);
405
}
D
Dave Airlie 已提交
406
EXPORT_SYMBOL(drm_mm_takedown);