提交 b3a070cc 编写于 作者: B Ben Widawsky 提交者: Daniel Vetter

drm: pre allocate node for create_block

For an upcoming patch where we introduce the i915 VMA, it's ideal to
have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated).
Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this
will break a bunch of code, but amongst them are 2 callers of
drm_mm_create_block(), both related to stolen memory.

It also allows us to embed the drm_mm_node into the object currently
which provides a nice transition over to the new code.

v2: Reordered to do before ripping out obj->gtt_offset.
Some minor cleanups made available because of reordering.

v3: s/continue/break on failed stolen node allocation (David)
Set obj->gtt_space on failed node allocation (David)
Only unref stolen (fix double free) on failed create_stolen (David)
Free node, and NULL it in failed create_stolen (David)
Add back accidentally removed newline (David)

CC: <dri-devel@lists.freedesktop.org>
Reviewed-by: NDavid Herrmann <dh.herrmann@gmail.com>
Signed-off-by: NBen Widawsky <ben@bwidawsk.net>
Acked-by: NDavid Airlie <airlied@linux.ie>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 b79480ba
......@@ -147,12 +147,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
}
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
unsigned long start,
unsigned long size,
bool atomic)
int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long start, unsigned long size)
{
struct drm_mm_node *hole, *node;
struct drm_mm_node *hole;
unsigned long end = start + size;
unsigned long hole_start;
unsigned long hole_end;
......@@ -161,10 +159,6 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
if (hole_start > start || hole_end < end)
continue;
node = drm_mm_kmalloc(mm, atomic);
if (unlikely(node == NULL))
return NULL;
node->start = start;
node->size = size;
node->mm = mm;
......@@ -184,11 +178,11 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
node->hole_follows = 1;
}
return node;
return 0;
}
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
return NULL;
return -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_create_block);
......
......@@ -629,14 +629,26 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
obj->gtt_offset, obj->base.size);
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
obj->gtt_offset,
obj->base.size,
false);
obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
if (!obj->gtt_space) {
DRM_ERROR("Failed to preserve object at offset %x\n",
obj->gtt_offset);
continue;
}
ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
obj->gtt_space,
obj->gtt_offset,
obj->base.size);
if (ret) {
DRM_DEBUG_KMS("Reservation failed\n");
kfree(obj->gtt_space);
obj->gtt_space = NULL;
}
obj->has_global_gtt_mapping = 1;
}
......
......@@ -330,6 +330,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (dev_priv->mm.stolen_base == 0)
return NULL;
......@@ -344,11 +345,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (WARN_ON(size == 0))
return NULL;
stolen = drm_mm_create_block(&dev_priv->mm.stolen,
stolen_offset, size,
false);
if (stolen == NULL) {
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset,
size);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
return NULL;
}
......@@ -369,13 +374,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* later.
*/
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
gtt_offset, size,
false);
if (obj->gtt_space == NULL) {
obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
if (!obj->gtt_space) {
DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
goto unref_out;
}
ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
obj->gtt_space,
gtt_offset, size);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
drm_gem_object_unreference(&obj->base);
return NULL;
goto free_out;
}
} else
obj->gtt_space = I915_GTT_RESERVED;
......@@ -387,6 +397,13 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
return obj;
free_out:
kfree(obj->gtt_space);
obj->gtt_space = NULL;
unref_out:
drm_gem_object_unreference(&obj->base);
return NULL;
}
void
......
......@@ -138,10 +138,10 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
/*
* Basic range manager support (drm_mm.c)
*/
extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
unsigned long start,
unsigned long size,
bool atomic);
extern int drm_mm_create_block(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long start,
unsigned long size);
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
......@@ -155,6 +155,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
unsigned long start,
unsigned long end,
int atomic);
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册