提交 88be9a0a 编写于 作者: M Matthew Auld

drm/i915/ttm: add ttm_buddy_man

Add back our standalone i915_buddy allocator and integrate it into a
ttm_resource_manager. This will plug into our ttm backend for managing
device local-memory in the next couple of patches.

v2(Thomas):
    - Return -ENOSPC from the buddy; ttm expects this in order to
      trigger eviction
    - Drop the unnecessary inline
    - bo->page_alignment is in page units
Signed-off-by: NMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Acked-by: NThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210616152501.394518-1-matthew.auld@intel.com
上级 c865204e
......@@ -162,6 +162,7 @@ gem-y += \
i915-y += \
$(gem-y) \
i915_active.o \
i915_buddy.o \
i915_cmd_parser.o \
i915_gem_evict.o \
i915_gem_gtt.o \
......@@ -171,6 +172,7 @@ i915-y += \
i915_request.o \
i915_scheduler.o \
i915_trace_points.o \
i915_ttm_buddy_manager.o \
i915_vma.o \
intel_wopcm.o
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <linux/kmemleak.h>
#include "i915_buddy.h"
#include "i915_gem.h"
#include "i915_utils.h"
static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm,
struct i915_buddy_block *parent,
unsigned int order,
u64 offset)
{
struct i915_buddy_block *block;
GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
block = kmem_cache_zalloc(mm->slab_blocks, GFP_KERNEL);
if (!block)
return NULL;
block->header = offset;
block->header |= order;
block->parent = parent;
GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
return block;
}
static void i915_block_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
kmem_cache_free(mm->slab_blocks, block);
}
static void mark_allocated(struct i915_buddy_block *block)
{
block->header &= ~I915_BUDDY_HEADER_STATE;
block->header |= I915_BUDDY_ALLOCATED;
list_del(&block->link);
}
static void mark_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
block->header &= ~I915_BUDDY_HEADER_STATE;
block->header |= I915_BUDDY_FREE;
list_add(&block->link,
&mm->free_list[i915_buddy_block_order(block)]);
}
static void mark_split(struct i915_buddy_block *block)
{
block->header &= ~I915_BUDDY_HEADER_STATE;
block->header |= I915_BUDDY_SPLIT;
list_del(&block->link);
}
int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
{
unsigned int i;
u64 offset;
if (size < chunk_size)
return -EINVAL;
if (chunk_size < PAGE_SIZE)
return -EINVAL;
if (!is_power_of_2(chunk_size))
return -EINVAL;
size = round_down(size, chunk_size);
mm->size = size;
mm->chunk_size = chunk_size;
mm->max_order = ilog2(size) - ilog2(chunk_size);
GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
mm->slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
if (!mm->slab_blocks)
return -ENOMEM;
mm->free_list = kmalloc_array(mm->max_order + 1,
sizeof(struct list_head),
GFP_KERNEL);
if (!mm->free_list)
goto out_destroy_slab;
for (i = 0; i <= mm->max_order; ++i)
INIT_LIST_HEAD(&mm->free_list[i]);
mm->n_roots = hweight64(size);
mm->roots = kmalloc_array(mm->n_roots,
sizeof(struct i915_buddy_block *),
GFP_KERNEL);
if (!mm->roots)
goto out_free_list;
offset = 0;
i = 0;
/*
* Split into power-of-two blocks, in case we are given a size that is
* not itself a power-of-two.
*/
do {
struct i915_buddy_block *root;
unsigned int order;
u64 root_size;
root_size = rounddown_pow_of_two(size);
order = ilog2(root_size) - ilog2(chunk_size);
root = i915_block_alloc(mm, NULL, order, offset);
if (!root)
goto out_free_roots;
mark_free(mm, root);
GEM_BUG_ON(i > mm->max_order);
GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
mm->roots[i] = root;
offset += root_size;
size -= root_size;
i++;
} while (size);
return 0;
out_free_roots:
while (i--)
i915_block_free(mm, mm->roots[i]);
kfree(mm->roots);
out_free_list:
kfree(mm->free_list);
out_destroy_slab:
kmem_cache_destroy(mm->slab_blocks);
return -ENOMEM;
}
void i915_buddy_fini(struct i915_buddy_mm *mm)
{
int i;
for (i = 0; i < mm->n_roots; ++i) {
GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
i915_block_free(mm, mm->roots[i]);
}
kfree(mm->roots);
kfree(mm->free_list);
kmem_cache_destroy(mm->slab_blocks);
}
static int split_block(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
unsigned int block_order = i915_buddy_block_order(block) - 1;
u64 offset = i915_buddy_block_offset(block);
GEM_BUG_ON(!i915_buddy_block_is_free(block));
GEM_BUG_ON(!i915_buddy_block_order(block));
block->left = i915_block_alloc(mm, block, block_order, offset);
if (!block->left)
return -ENOMEM;
block->right = i915_block_alloc(mm, block, block_order,
offset + (mm->chunk_size << block_order));
if (!block->right) {
i915_block_free(mm, block->left);
return -ENOMEM;
}
mark_free(mm, block->left);
mark_free(mm, block->right);
mark_split(block);
return 0;
}
static struct i915_buddy_block *
get_buddy(struct i915_buddy_block *block)
{
struct i915_buddy_block *parent;
parent = block->parent;
if (!parent)
return NULL;
if (parent->left == block)
return parent->right;
return parent->left;
}
static void __i915_buddy_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
struct i915_buddy_block *parent;
while ((parent = block->parent)) {
struct i915_buddy_block *buddy;
buddy = get_buddy(block);
if (!i915_buddy_block_is_free(buddy))
break;
list_del(&buddy->link);
i915_block_free(mm, block);
i915_block_free(mm, buddy);
block = parent;
}
mark_free(mm, block);
}
void i915_buddy_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
__i915_buddy_free(mm, block);
}
void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
{
struct i915_buddy_block *block, *on;
list_for_each_entry_safe(block, on, objects, link) {
i915_buddy_free(mm, block);
cond_resched();
}
INIT_LIST_HEAD(objects);
}
/*
* Allocate power-of-two block. The order value here translates to:
*
* 0 = 2^0 * mm->chunk_size
* 1 = 2^1 * mm->chunk_size
* 2 = 2^2 * mm->chunk_size
* ...
*/
struct i915_buddy_block *
i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
{
struct i915_buddy_block *block = NULL;
unsigned int i;
int err;
for (i = order; i <= mm->max_order; ++i) {
block = list_first_entry_or_null(&mm->free_list[i],
struct i915_buddy_block,
link);
if (block)
break;
}
if (!block)
return ERR_PTR(-ENOSPC);
GEM_BUG_ON(!i915_buddy_block_is_free(block));
while (i != order) {
err = split_block(mm, block);
if (unlikely(err))
goto out_free;
/* Go low */
block = block->left;
i--;
}
mark_allocated(block);
kmemleak_update_trace(block);
return block;
out_free:
if (i != order)
__i915_buddy_free(mm, block);
return ERR_PTR(err);
}
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
{
return s1 <= e2 && e1 >= s2;
}
static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
{
return s1 <= s2 && e1 >= e2;
}
/*
* Allocate range. Note that it's safe to chain together multiple alloc_ranges
* with the same blocks list.
*
* Intended for pre-allocating portions of the address space, for example to
* reserve a block for the initial framebuffer or similar, hence the expectation
* here is that i915_buddy_alloc() is still the main vehicle for
* allocations, so if that's not the case then the drm_mm range allocator is
* probably a much better fit, and so you should probably go use that instead.
*/
int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
struct list_head *blocks,
u64 start, u64 size)
{
struct i915_buddy_block *block;
struct i915_buddy_block *buddy;
LIST_HEAD(allocated);
LIST_HEAD(dfs);
u64 end;
int err;
int i;
if (size < mm->chunk_size)
return -EINVAL;
if (!IS_ALIGNED(size | start, mm->chunk_size))
return -EINVAL;
if (range_overflows(start, size, mm->size))
return -EINVAL;
for (i = 0; i < mm->n_roots; ++i)
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
end = start + size - 1;
do {
u64 block_start;
u64 block_end;
block = list_first_entry_or_null(&dfs,
struct i915_buddy_block,
tmp_link);
if (!block)
break;
list_del(&block->tmp_link);
block_start = i915_buddy_block_offset(block);
block_end = block_start + i915_buddy_block_size(mm, block) - 1;
if (!overlaps(start, end, block_start, block_end))
continue;
if (i915_buddy_block_is_allocated(block)) {
err = -ENOSPC;
goto err_free;
}
if (contains(start, end, block_start, block_end)) {
if (!i915_buddy_block_is_free(block)) {
err = -ENOSPC;
goto err_free;
}
mark_allocated(block);
list_add_tail(&block->link, &allocated);
continue;
}
if (!i915_buddy_block_is_split(block)) {
err = split_block(mm, block);
if (unlikely(err))
goto err_undo;
}
list_add(&block->right->tmp_link, &dfs);
list_add(&block->left->tmp_link, &dfs);
} while (1);
list_splice_tail(&allocated, blocks);
return 0;
err_undo:
/*
* We really don't want to leave around a bunch of split blocks, since
* bigger is better, so make sure we merge everything back before we
* free the allocated blocks.
*/
buddy = get_buddy(block);
if (buddy &&
(i915_buddy_block_is_free(block) &&
i915_buddy_block_is_free(buddy)))
__i915_buddy_free(mm, block);
err_free:
i915_buddy_free_list(mm, &allocated);
return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_buddy.c"
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __I915_BUDDY_H__
#define __I915_BUDDY_H__
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/slab.h>
struct i915_buddy_block {
#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
#define I915_BUDDY_ALLOCATED (1 << 10)
#define I915_BUDDY_FREE (2 << 10)
#define I915_BUDDY_SPLIT (3 << 10)
/* Free to be used, if needed in the future */
#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
u64 header;
struct i915_buddy_block *left;
struct i915_buddy_block *right;
struct i915_buddy_block *parent;
void *private; /* owned by creator */
/*
* While the block is allocated by the user through i915_buddy_alloc*,
* the user has ownership of the link, for example to maintain within
* a list, if so desired. As soon as the block is freed with
* i915_buddy_free* ownership is given back to the mm.
*/
struct list_head link;
struct list_head tmp_link;
};
/* Order-zero must be at least PAGE_SIZE */
#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
/*
* Binary Buddy System.
*
* Locking should be handled by the user, a simple mutex around
* i915_buddy_alloc* and i915_buddy_free* should suffice.
*/
struct i915_buddy_mm {
struct kmem_cache *slab_blocks;
/* Maintain a free list for each order. */
struct list_head *free_list;
/*
* Maintain explicit binary tree(s) to track the allocation of the
* address space. This gives us a simple way of finding a buddy block
* and performing the potentially recursive merge step when freeing a
* block. Nodes are either allocated or free, in which case they will
* also exist on the respective free list.
*/
struct i915_buddy_block **roots;
/*
* Anything from here is public, and remains static for the lifetime of
* the mm. Everything above is considered do-not-touch.
*/
unsigned int n_roots;
unsigned int max_order;
/* Must be at least PAGE_SIZE */
u64 chunk_size;
u64 size;
};
static inline u64
i915_buddy_block_offset(struct i915_buddy_block *block)
{
return block->header & I915_BUDDY_HEADER_OFFSET;
}
static inline unsigned int
i915_buddy_block_order(struct i915_buddy_block *block)
{
return block->header & I915_BUDDY_HEADER_ORDER;
}
static inline unsigned int
i915_buddy_block_state(struct i915_buddy_block *block)
{
return block->header & I915_BUDDY_HEADER_STATE;
}
static inline bool
i915_buddy_block_is_allocated(struct i915_buddy_block *block)
{
return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
}
static inline bool
i915_buddy_block_is_free(struct i915_buddy_block *block)
{
return i915_buddy_block_state(block) == I915_BUDDY_FREE;
}
static inline bool
i915_buddy_block_is_split(struct i915_buddy_block *block)
{
return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
}
static inline u64
i915_buddy_block_size(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
return mm->chunk_size << i915_buddy_block_order(block);
}
int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
void i915_buddy_fini(struct i915_buddy_mm *mm);
struct i915_buddy_block *
i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
struct list_head *blocks,
u64 start, u64 size);
void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
#endif
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <linux/slab.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include "i915_ttm_buddy_manager.h"
#include "i915_buddy.h"
#include "i915_gem.h"
struct i915_ttm_buddy_manager {
struct ttm_resource_manager manager;
struct i915_buddy_mm mm;
struct list_head reserved;
struct mutex lock;
};
static struct i915_ttm_buddy_manager *
to_buddy_manager(struct ttm_resource_manager *man)
{
return container_of(man, struct i915_ttm_buddy_manager, manager);
}
static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct i915_ttm_buddy_resource *bman_res;
struct i915_buddy_mm *mm = &bman->mm;
unsigned long n_pages;
unsigned int min_order;
u64 min_page_size;
u64 size;
int err;
GEM_BUG_ON(place->fpfn || place->lpfn);
bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
if (!bman_res)
return -ENOMEM;
ttm_resource_init(bo, place, &bman_res->base);
INIT_LIST_HEAD(&bman_res->blocks);
bman_res->mm = mm;
GEM_BUG_ON(!bman_res->base.num_pages);
size = bman_res->base.num_pages << PAGE_SHIFT;
min_page_size = bo->page_alignment << PAGE_SHIFT;
GEM_BUG_ON(min_page_size < mm->chunk_size);
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
size = roundup_pow_of_two(size);
min_order = ilog2(size) - ilog2(mm->chunk_size);
}
if (size > mm->size) {
err = -E2BIG;
goto err_free_res;
}
n_pages = size >> ilog2(mm->chunk_size);
do {
struct i915_buddy_block *block;
unsigned int order;
order = fls(n_pages) - 1;
GEM_BUG_ON(order > mm->max_order);
GEM_BUG_ON(order < min_order);
do {
mutex_lock(&bman->lock);
block = i915_buddy_alloc(mm, order);
mutex_unlock(&bman->lock);
if (!IS_ERR(block))
break;
if (order-- == min_order) {
err = -ENOSPC;
goto err_free_blocks;
}
} while (1);
n_pages -= BIT(order);
list_add_tail(&block->link, &bman_res->blocks);
if (!n_pages)
break;
} while (1);
*res = &bman_res->base;
return 0;
err_free_blocks:
mutex_lock(&bman->lock);
i915_buddy_free_list(mm, &bman_res->blocks);
mutex_unlock(&bman->lock);
err_free_res:
kfree(bman_res);
return err;
}
static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
mutex_lock(&bman->lock);
i915_buddy_free_list(&bman->mm, &bman_res->blocks);
mutex_unlock(&bman->lock);
kfree(bman_res);
}
static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
.alloc = i915_ttm_buddy_man_alloc,
.free = i915_ttm_buddy_man_free,
};
/**
* i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
* @bdev: The ttm device
* @type: Memory type we want to manage
* @use_tt: Set use_tt for the manager
* @size: The size in bytes to manage
* @chunk_size: The minimum page size in bytes for our allocations i.e
* order-zero
*
* Note that the starting address is assumed to be zero here, since this
* simplifies keeping the property where allocated blocks having natural
* power-of-two alignment. So long as the real starting address is some large
* power-of-two, or naturally start from zero, then this should be fine. Also
* the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
* if say there is some unusable range from the start of the region. We can
* revisit this in the future and make the interface accept an actual starting
* offset and let it take care of the rest.
*
* Note that if the @size is not aligned to the @chunk_size then we perform the
* required rounding to get the usable size. The final size in pages can be
* taken from &ttm_resource_manager.size.
*
* Return: 0 on success, negative error code on failure.
*/
int i915_ttm_buddy_man_init(struct ttm_device *bdev,
unsigned int type, bool use_tt,
u64 size, u64 chunk_size)
{
struct ttm_resource_manager *man;
struct i915_ttm_buddy_manager *bman;
int err;
bman = kzalloc(sizeof(*bman), GFP_KERNEL);
if (!bman)
return -ENOMEM;
err = i915_buddy_init(&bman->mm, size, chunk_size);
if (err)
goto err_free_bman;
mutex_init(&bman->lock);
INIT_LIST_HEAD(&bman->reserved);
man = &bman->manager;
man->use_tt = use_tt;
man->func = &i915_ttm_buddy_manager_func;
ttm_resource_manager_init(man, bman->mm.size >> PAGE_SHIFT);
ttm_resource_manager_set_used(man, true);
ttm_set_driver_manager(bdev, type, man);
return 0;
err_free_bman:
kfree(bman);
return err;
}
/**
* i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
* @bdev: The ttm device
* @type: Memory type we want to manage
*
* Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
* also be freed for us here.
*
* Return: 0 on success, negative error code on failure.
*/
int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct i915_buddy_mm *mm = &bman->mm;
int ret;
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
return ret;
ttm_set_driver_manager(bdev, type, NULL);
mutex_lock(&bman->lock);
i915_buddy_free_list(mm, &bman->reserved);
i915_buddy_fini(mm);
mutex_unlock(&bman->lock);
ttm_resource_manager_cleanup(man);
kfree(bman);
return 0;
}
/**
* i915_ttm_buddy_man_reserve - Reserve address range
* @man: The buddy allocator ttm manager
* @start: The offset in bytes, where the region start is assumed to be zero
* @size: The size in bytes
*
* Note that the starting address for the region is always assumed to be zero.
*
* Return: 0 on success, negative error code on failure.
*/
int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
u64 start, u64 size)
{
struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
struct i915_buddy_mm *mm = &bman->mm;
int ret;
mutex_lock(&bman->lock);
ret = i915_buddy_alloc_range(mm, &bman->reserved, start, size);
mutex_unlock(&bman->lock);
return ret;
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __I915_TTM_BUDDY_MANAGER_H__
#define __I915_TTM_BUDDY_MANAGER_H__
#include <linux/list.h>
#include <linux/types.h>
#include <drm/ttm/ttm_resource.h>
struct ttm_device;
struct ttm_resource_manager;
struct i915_buddy_mm;
/**
* struct i915_ttm_buddy_resource
*
* @base: struct ttm_resource base class we extend
* @blocks: the list of struct i915_buddy_block for this resource/allocation
* @mm: the struct i915_buddy_mm for this resource
*
* Extends the struct ttm_resource to manage an address space allocation with
* one or more struct i915_buddy_block.
*/
struct i915_ttm_buddy_resource {
struct ttm_resource base;
struct list_head blocks;
struct i915_buddy_mm *mm;
};
/**
* to_ttm_buddy_resource
*
* @res: the resource to upcast
*
* Upcast the struct ttm_resource object into a struct i915_ttm_buddy_resource.
*/
static inline struct i915_ttm_buddy_resource *
to_ttm_buddy_resource(struct ttm_resource *res)
{
return container_of(res, struct i915_ttm_buddy_resource, base);
}
int i915_ttm_buddy_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
u64 size, u64 chunk_size);
int i915_ttm_buddy_man_fini(struct ttm_device *bdev,
unsigned int type);
int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
u64 start, u64 size);
#endif
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/prime_numbers.h>
#include "../i915_selftest.h"
#include "i915_random.h"
static void __igt_dump_block(struct i915_buddy_mm *mm,
struct i915_buddy_block *block,
bool buddy)
{
pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
block->header,
i915_buddy_block_state(block),
i915_buddy_block_order(block),
i915_buddy_block_offset(block),
i915_buddy_block_size(mm, block),
yesno(!block->parent),
yesno(buddy));
}
static void igt_dump_block(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
struct i915_buddy_block *buddy;
__igt_dump_block(mm, block, false);
buddy = get_buddy(block);
if (buddy)
__igt_dump_block(mm, buddy, true);
}
static int igt_check_block(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
struct i915_buddy_block *buddy;
unsigned int block_state;
u64 block_size;
u64 offset;
int err = 0;
block_state = i915_buddy_block_state(block);
if (block_state != I915_BUDDY_ALLOCATED &&
block_state != I915_BUDDY_FREE &&
block_state != I915_BUDDY_SPLIT) {
pr_err("block state mismatch\n");
err = -EINVAL;
}
block_size = i915_buddy_block_size(mm, block);
offset = i915_buddy_block_offset(block);
if (block_size < mm->chunk_size) {
pr_err("block size smaller than min size\n");
err = -EINVAL;
}
if (!is_power_of_2(block_size)) {
pr_err("block size not power of two\n");
err = -EINVAL;
}
if (!IS_ALIGNED(block_size, mm->chunk_size)) {
pr_err("block size not aligned to min size\n");
err = -EINVAL;
}
if (!IS_ALIGNED(offset, mm->chunk_size)) {
pr_err("block offset not aligned to min size\n");
err = -EINVAL;
}
if (!IS_ALIGNED(offset, block_size)) {
pr_err("block offset not aligned to block size\n");
err = -EINVAL;
}
buddy = get_buddy(block);
if (!buddy && block->parent) {
pr_err("buddy has gone fishing\n");
err = -EINVAL;
}
if (buddy) {
if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
pr_err("buddy has wrong offset\n");
err = -EINVAL;
}
if (i915_buddy_block_size(mm, buddy) != block_size) {
pr_err("buddy size mismatch\n");
err = -EINVAL;
}
if (i915_buddy_block_state(buddy) == block_state &&
block_state == I915_BUDDY_FREE) {
pr_err("block and its buddy are free\n");
err = -EINVAL;
}
}
return err;
}
static int igt_check_blocks(struct i915_buddy_mm *mm,
struct list_head *blocks,
u64 expected_size,
bool is_contiguous)
{
struct i915_buddy_block *block;
struct i915_buddy_block *prev;
u64 total;
int err = 0;
block = NULL;
prev = NULL;
total = 0;
list_for_each_entry(block, blocks, link) {
err = igt_check_block(mm, block);
if (!i915_buddy_block_is_allocated(block)) {
pr_err("block not allocated\n"),
err = -EINVAL;
}
if (is_contiguous && prev) {
u64 prev_block_size;
u64 prev_offset;
u64 offset;
prev_offset = i915_buddy_block_offset(prev);
prev_block_size = i915_buddy_block_size(mm, prev);
offset = i915_buddy_block_offset(block);
if (offset != (prev_offset + prev_block_size)) {
pr_err("block offset mismatch\n");
err = -EINVAL;
}
}
if (err)
break;
total += i915_buddy_block_size(mm, block);
prev = block;
}
if (!err) {
if (total != expected_size) {
pr_err("size mismatch, expected=%llx, found=%llx\n",
expected_size, total);
err = -EINVAL;
}
return err;
}
if (prev) {
pr_err("prev block, dump:\n");
igt_dump_block(mm, prev);
}
if (block) {
pr_err("bad block, dump:\n");
igt_dump_block(mm, block);
}
return err;
}
static int igt_check_mm(struct i915_buddy_mm *mm)
{
struct i915_buddy_block *root;
struct i915_buddy_block *prev;
unsigned int i;
u64 total;
int err = 0;
if (!mm->n_roots) {
pr_err("n_roots is zero\n");
return -EINVAL;
}
if (mm->n_roots != hweight64(mm->size)) {
pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
mm->n_roots, hweight64(mm->size));
return -EINVAL;
}
root = NULL;
prev = NULL;
total = 0;
for (i = 0; i < mm->n_roots; ++i) {
struct i915_buddy_block *block;
unsigned int order;
root = mm->roots[i];
if (!root) {
pr_err("root(%u) is NULL\n", i);
err = -EINVAL;
break;
}
err = igt_check_block(mm, root);
if (!i915_buddy_block_is_free(root)) {
pr_err("root not free\n");
err = -EINVAL;
}
order = i915_buddy_block_order(root);
if (!i) {
if (order != mm->max_order) {
pr_err("max order root missing\n");
err = -EINVAL;
}
}
if (prev) {
u64 prev_block_size;
u64 prev_offset;
u64 offset;
prev_offset = i915_buddy_block_offset(prev);
prev_block_size = i915_buddy_block_size(mm, prev);
offset = i915_buddy_block_offset(root);
if (offset != (prev_offset + prev_block_size)) {
pr_err("root offset mismatch\n");
err = -EINVAL;
}
}
block = list_first_entry_or_null(&mm->free_list[order],
struct i915_buddy_block,
link);
if (block != root) {
pr_err("root mismatch at order=%u\n", order);
err = -EINVAL;
}
if (err)
break;
prev = root;
total += i915_buddy_block_size(mm, root);
}
if (!err) {
if (total != mm->size) {
pr_err("expected mm size=%llx, found=%llx\n", mm->size,
total);
err = -EINVAL;
}
return err;
}
if (prev) {
pr_err("prev root(%u), dump:\n", i - 1);
igt_dump_block(mm, prev);
}
if (root) {
pr_err("bad root(%u), dump:\n", i);
igt_dump_block(mm, root);
}
return err;
}
static void igt_mm_config(u64 *size, u64 *chunk_size)
{
I915_RND_STATE(prng);
u32 s, ms;
/* Nothing fancy, just try to get an interesting bit pattern */
prandom_seed_state(&prng, i915_selftest.random_seed);
/* Let size be a random number of pages up to 8 GB (2M pages) */
s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
/* Let the chunk size be a random power of 2 less than size */
ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
/* Round size down to the chunk size */
s &= -ms;
/* Convert from pages to bytes */
*chunk_size = (u64)ms << 12;
*size = (u64)s << 12;
}
static int igt_buddy_alloc_smoke(void *arg)
{
struct i915_buddy_mm mm;
IGT_TIMEOUT(end_time);
I915_RND_STATE(prng);
u64 chunk_size;
u64 mm_size;
int *order;
int err, i;
igt_mm_config(&mm_size, &chunk_size);
pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
err = i915_buddy_init(&mm, mm_size, chunk_size);
if (err) {
pr_err("buddy_init failed(%d)\n", err);
return err;
}
order = i915_random_order(mm.max_order + 1, &prng);
if (!order)
goto out_fini;
for (i = 0; i <= mm.max_order; ++i) {
struct i915_buddy_block *block;
int max_order = order[i];
bool timeout = false;
LIST_HEAD(blocks);
int order;
u64 total;
err = igt_check_mm(&mm);
if (err) {
pr_err("pre-mm check failed, abort\n");
break;
}
pr_info("filling from max_order=%u\n", max_order);
order = max_order;
total = 0;
do {
retry:
block = i915_buddy_alloc(&mm, order);
if (IS_ERR(block)) {
err = PTR_ERR(block);
if (err == -ENOMEM) {
pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
order);
} else {
if (order--) {
err = 0;
goto retry;
}
pr_err("buddy_alloc with order=%d failed(%d)\n",
order, err);
}
break;
}
list_add_tail(&block->link, &blocks);
if (i915_buddy_block_order(block) != order) {
pr_err("buddy_alloc order mismatch\n");
err = -EINVAL;
break;
}
total += i915_buddy_block_size(&mm, block);
if (__igt_timeout(end_time, NULL)) {
timeout = true;
break;
}
} while (total < mm.size);
if (!err)
err = igt_check_blocks(&mm, &blocks, total, false);
i915_buddy_free_list(&mm, &blocks);
if (!err) {
err = igt_check_mm(&mm);
if (err)
pr_err("post-mm check failed\n");
}
if (err || timeout)
break;
cond_resched();
}
if (err == -ENOMEM)
err = 0;
kfree(order);
out_fini:
i915_buddy_fini(&mm);
return err;
}
static int igt_buddy_alloc_pessimistic(void *arg)
{
const unsigned int max_order = 16;
struct i915_buddy_block *block, *bn;
struct i915_buddy_mm mm;
unsigned int order;
LIST_HEAD(blocks);
int err;
/*
* Create a pot-sized mm, then allocate one of each possible
* order within. This should leave the mm with exactly one
* page left.
*/
err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
if (err) {
pr_err("buddy_init failed(%d)\n", err);
return err;
}
GEM_BUG_ON(mm.max_order != max_order);
for (order = 0; order < max_order; order++) {
block = i915_buddy_alloc(&mm, order);
if (IS_ERR(block)) {
pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
order);
err = PTR_ERR(block);
goto err;
}
list_add_tail(&block->link, &blocks);
}
/* And now the last remaining block available */
block = i915_buddy_alloc(&mm, 0);
if (IS_ERR(block)) {
pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
err = PTR_ERR(block);
goto err;
}
list_add_tail(&block->link, &blocks);
/* Should be completely full! */
for (order = max_order; order--; ) {
block = i915_buddy_alloc(&mm, order);
if (!IS_ERR(block)) {
pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
order);
list_add_tail(&block->link, &blocks);
err = -EINVAL;
goto err;
}
}
block = list_last_entry(&blocks, typeof(*block), link);
list_del(&block->link);
i915_buddy_free(&mm, block);
/* As we free in increasing size, we make available larger blocks */
order = 1;
list_for_each_entry_safe(block, bn, &blocks, link) {
list_del(&block->link);
i915_buddy_free(&mm, block);
block = i915_buddy_alloc(&mm, order);
if (IS_ERR(block)) {
pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
order);
err = PTR_ERR(block);
goto err;
}
i915_buddy_free(&mm, block);
order++;
}
/* To confirm, now the whole mm should be available */
block = i915_buddy_alloc(&mm, max_order);
if (IS_ERR(block)) {
pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
max_order);
err = PTR_ERR(block);
goto err;
}
i915_buddy_free(&mm, block);
err:
i915_buddy_free_list(&mm, &blocks);
i915_buddy_fini(&mm);
return err;
}
static int igt_buddy_alloc_optimistic(void *arg)
{
const int max_order = 16;
struct i915_buddy_block *block;
struct i915_buddy_mm mm;
LIST_HEAD(blocks);
int order;
int err;
/*
* Create a mm with one block of each order available, and
* try to allocate them all.
*/
err = i915_buddy_init(&mm,
PAGE_SIZE * ((1 << (max_order + 1)) - 1),
PAGE_SIZE);
if (err) {
pr_err("buddy_init failed(%d)\n", err);
return err;
}
GEM_BUG_ON(mm.max_order != max_order);
for (order = 0; order <= max_order; order++) {
block = i915_buddy_alloc(&mm, order);
if (IS_ERR(block)) {
pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
order);
err = PTR_ERR(block);
goto err;
}
list_add_tail(&block->link, &blocks);
}
/* Should be completely full! */
block = i915_buddy_alloc(&mm, 0);
if (!IS_ERR(block)) {
pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
list_add_tail(&block->link, &blocks);
err = -EINVAL;
goto err;
}
err:
i915_buddy_free_list(&mm, &blocks);
i915_buddy_fini(&mm);
return err;
}
static int igt_buddy_alloc_pathological(void *arg)
{
const int max_order = 16;
struct i915_buddy_block *block;
struct i915_buddy_mm mm;
LIST_HEAD(blocks);
LIST_HEAD(holes);
int order, top;
int err;
/*
* Create a pot-sized mm, then allocate one of each possible
* order within. This should leave the mm with exactly one
* page left. Free the largest block, then whittle down again.
* Eventually we will have a fully 50% fragmented mm.
*/
err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
if (err) {
pr_err("buddy_init failed(%d)\n", err);
return err;
}
GEM_BUG_ON(mm.max_order != max_order);
for (top = max_order; top; top--) {
/* Make room by freeing the largest allocated block */
block = list_first_entry_or_null(&blocks, typeof(*block), link);
if (block) {
list_del(&block->link);
i915_buddy_free(&mm, block);
}
for (order = top; order--; ) {
block = i915_buddy_alloc(&mm, order);
if (IS_ERR(block)) {
pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
order, top);
err = PTR_ERR(block);
goto err;
}
list_add_tail(&block->link, &blocks);
}
/* There should be one final page for this sub-allocation */
block = i915_buddy_alloc(&mm, 0);
if (IS_ERR(block)) {
pr_info("buddy_alloc hit -ENOMEM for hole\n");
err = PTR_ERR(block);
goto err;
}
list_add_tail(&block->link, &holes);
block = i915_buddy_alloc(&mm, top);
if (!IS_ERR(block)) {
pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
top, max_order);
list_add_tail(&block->link, &blocks);
err = -EINVAL;
goto err;
}
}
i915_buddy_free_list(&mm, &holes);
/* Nothing larger than blocks of chunk_size now available */
for (order = 1; order <= max_order; order++) {
block = i915_buddy_alloc(&mm, order);
if (!IS_ERR(block)) {
pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
order);
list_add_tail(&block->link, &blocks);
err = -EINVAL;
goto err;
}
}
err:
list_splice_tail(&holes, &blocks);
i915_buddy_free_list(&mm, &blocks);
i915_buddy_fini(&mm);
return err;
}
static int igt_buddy_alloc_range(void *arg)
{
struct i915_buddy_mm mm;
unsigned long page_num;
LIST_HEAD(blocks);
u64 chunk_size;
u64 offset;
u64 size;
u64 rem;
int err;
igt_mm_config(&size, &chunk_size);
pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
err = i915_buddy_init(&mm, size, chunk_size);
if (err) {
pr_err("buddy_init failed(%d)\n", err);
return err;
}
err = igt_check_mm(&mm);
if (err) {
pr_err("pre-mm check failed, abort, abort, abort!\n");
goto err_fini;
}
rem = mm.size;
offset = 0;
for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
struct i915_buddy_block *block;
LIST_HEAD(tmp);
size = min(page_num * mm.chunk_size, rem);
err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
if (err) {
if (err == -ENOMEM) {
pr_info("alloc_range hit -ENOMEM with size=%llx\n",
size);
} else {
pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
offset, size, err);
}
break;
}
block = list_first_entry_or_null(&tmp,
struct i915_buddy_block,
link);
if (!block) {
pr_err("alloc_range has no blocks\n");
err = -EINVAL;
break;
}
if (i915_buddy_block_offset(block) != offset) {
pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
i915_buddy_block_offset(block), offset);
err = -EINVAL;
}
if (!err)
err = igt_check_blocks(&mm, &tmp, size, true);
list_splice_tail(&tmp, &blocks);
if (err)
break;
offset += size;
rem -= size;
if (!rem)
break;
cond_resched();
}
if (err == -ENOMEM)
err = 0;
i915_buddy_free_list(&mm, &blocks);
if (!err) {
err = igt_check_mm(&mm);
if (err)
pr_err("post-mm check failed\n");
}
err_fini:
i915_buddy_fini(&mm);
return err;
}
static int igt_buddy_alloc_limit(void *arg)
{
struct i915_buddy_block *block;
struct i915_buddy_mm mm;
const u64 size = U64_MAX;
int err;
err = i915_buddy_init(&mm, size, PAGE_SIZE);
if (err)
return err;
if (mm.max_order != I915_BUDDY_MAX_ORDER) {
pr_err("mm.max_order(%d) != %d\n",
mm.max_order, I915_BUDDY_MAX_ORDER);
err = -EINVAL;
goto out_fini;
}
block = i915_buddy_alloc(&mm, mm.max_order);
if (IS_ERR(block)) {
err = PTR_ERR(block);
goto out_fini;
}
if (i915_buddy_block_order(block) != mm.max_order) {
pr_err("block order(%d) != %d\n",
i915_buddy_block_order(block), mm.max_order);
err = -EINVAL;
goto out_free;
}
if (i915_buddy_block_size(&mm, block) !=
BIT_ULL(mm.max_order) * PAGE_SIZE) {
pr_err("block size(%llu) != %llu\n",
i915_buddy_block_size(&mm, block),
BIT_ULL(mm.max_order) * PAGE_SIZE);
err = -EINVAL;
goto out_free;
}
out_free:
i915_buddy_free(&mm, block);
out_fini:
i915_buddy_fini(&mm);
return err;
}
int i915_buddy_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_buddy_alloc_pessimistic),
SUBTEST(igt_buddy_alloc_optimistic),
SUBTEST(igt_buddy_alloc_pathological),
SUBTEST(igt_buddy_alloc_smoke),
SUBTEST(igt_buddy_alloc_range),
SUBTEST(igt_buddy_alloc_limit),
};
return i915_subtests(tests, NULL);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册