提交 fd76bab2 编写于 作者: P Pekka Enberg 提交者: Linus Torvalds

slab: introduce krealloc

This introduce krealloc() that reallocates memory while keeping the contents
unchanged.  The allocator avoids reallocation if the new size fits the
currently used cache.  I also added a simple non-optimized version for
mm/slob.c for compatibility.

[akpm@linux-foundation.org: fix warnings]
Acked-by: NJosef Sipek <jsipek@fsl.cs.sunysb.edu>
Acked-by: NMatt Mackall <mpm@selenic.com>
Acked-by: NChristoph Lameter <clameter@sgi.com>
Signed-off-by: NPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 e3ebadd9
......@@ -72,8 +72,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
*/
void *__kmalloc(size_t, gfp_t);
void *__kzalloc(size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
unsigned int ksize(const void *);
size_t ksize(const void *);
/**
* kcalloc - allocate memory for an array. The memory is set to zero.
......
......@@ -3739,6 +3739,53 @@ void *__kmalloc(size_t size, gfp_t flags)
EXPORT_SYMBOL(__kmalloc);
#endif
/**
* krealloc - reallocate memory. The contents will remain unchanged.
*
* @p: object to reallocate memory for.
* @new_size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*
* The contents of the object pointed to are preserved up to the
* lesser of the new and old sizes. If @p is %NULL, krealloc()
* behaves exactly like kmalloc(). If @size is 0 and @p is not a
* %NULL pointer, the object pointed to is freed.
*/
void *krealloc(const void *p, size_t new_size, gfp_t flags)
{
struct kmem_cache *cache, *new_cache;
void *ret;
if (unlikely(!p))
return kmalloc_track_caller(new_size, flags);
if (unlikely(!new_size)) {
kfree(p);
return NULL;
}
cache = virt_to_cache(p);
new_cache = __find_general_cachep(new_size, flags);
/*
* If new size fits in the current cache, bail out.
*/
if (likely(cache == new_cache))
return (void *)p;
/*
* We are on the slow-path here so do not use __cache_alloc
* because it bloats kernel text.
*/
ret = kmalloc_track_caller(new_size, flags);
if (ret) {
memcpy(ret, p, min(new_size, ksize(p)));
kfree(p);
}
return ret;
}
EXPORT_SYMBOL(krealloc);
/**
* kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
......@@ -4481,7 +4528,7 @@ const struct seq_operations slabstats_op = {
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
*/
unsigned int ksize(const void *objp)
size_t ksize(const void *objp)
{
if (unlikely(objp == NULL))
return 0;
......
......@@ -190,6 +190,39 @@ void *__kmalloc(size_t size, gfp_t gfp)
}
EXPORT_SYMBOL(__kmalloc);
/**
* krealloc - reallocate memory. The contents will remain unchanged.
*
* @p: object to reallocate memory for.
* @new_size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*
* The contents of the object pointed to are preserved up to the
* lesser of the new and old sizes. If @p is %NULL, krealloc()
* behaves exactly like kmalloc(). If @size is 0 and @p is not a
* %NULL pointer, the object pointed to is freed.
*/
void *krealloc(const void *p, size_t new_size, gfp_t flags)
{
void *ret;
if (unlikely(!p))
return kmalloc_track_caller(new_size, flags);
if (unlikely(!new_size)) {
kfree(p);
return NULL;
}
ret = kmalloc_track_caller(new_size, flags);
if (ret) {
memcpy(ret, p, min(new_size, ksize(p)));
kfree(p);
}
return ret;
}
EXPORT_SYMBOL(krealloc);
void kfree(const void *block)
{
bigblock_t *bb, **last = &bigblocks;
......@@ -219,7 +252,7 @@ void kfree(const void *block)
EXPORT_SYMBOL(kfree);
unsigned int ksize(const void *block)
size_t ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册