提交 89579f77 编写于 作者: T Thomas Hellstrom 提交者: Dave Airlie

drm: Apply "Memory fragmentation from lost alignment blocks"

also for the atomic path by using a common code-path.
Signed-off-by: NThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 78ecf091
...@@ -187,9 +187,10 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, ...@@ -187,9 +187,10 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
} }
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, unsigned long size,
unsigned long size, unsigned alignment) unsigned alignment,
int atomic)
{ {
struct drm_mm_node *align_splitoff = NULL; struct drm_mm_node *align_splitoff = NULL;
...@@ -200,7 +201,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, ...@@ -200,7 +201,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
if (tmp) { if (tmp) {
align_splitoff = align_splitoff =
drm_mm_split_at_start(node, alignment - tmp, 0); drm_mm_split_at_start(node, alignment - tmp, atomic);
if (unlikely(align_splitoff == NULL)) if (unlikely(align_splitoff == NULL))
return NULL; return NULL;
} }
...@@ -209,7 +210,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, ...@@ -209,7 +210,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
list_del_init(&node->fl_entry); list_del_init(&node->fl_entry);
node->free = 0; node->free = 0;
} else { } else {
node = drm_mm_split_at_start(node, size, 0); node = drm_mm_split_at_start(node, size, atomic);
} }
if (align_splitoff) if (align_splitoff)
...@@ -217,42 +218,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node, ...@@ -217,42 +218,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
return node; return node;
} }
EXPORT_SYMBOL(drm_mm_get_block_generic);
EXPORT_SYMBOL(drm_mm_get_block);
struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
struct drm_mm_node *align_splitoff = NULL;
struct drm_mm_node *child;
unsigned tmp = 0;
if (alignment)
tmp = parent->start % alignment;
if (tmp) {
align_splitoff =
drm_mm_split_at_start(parent, alignment - tmp, 1);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (parent->size == size) {
list_del_init(&parent->fl_entry);
parent->free = 0;
return parent;
} else {
child = drm_mm_split_at_start(parent, size, 1);
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return child;
}
EXPORT_SYMBOL(drm_mm_get_block_atomic);
/* /*
* Put a block. Merge with the previous and / or next block if they are free. * Put a block. Merge with the previous and / or next block if they are free.
......
...@@ -59,13 +59,22 @@ struct drm_mm { ...@@ -59,13 +59,22 @@ struct drm_mm {
/* /*
* Basic range manager support (drm_mm.c) * Basic range manager support (drm_mm.c)
*/ */
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, unsigned long size,
unsigned long size, unsigned alignment,
unsigned alignment); int atomic);
extern struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size, unsigned long size,
unsigned alignment); unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 1);
}
extern void drm_mm_put_block(struct drm_mm_node *cur); extern void drm_mm_put_block(struct drm_mm_node *cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size, unsigned long size,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册