提交 bdfb0430 编写于 作者: C Christoph Hellwig 提交者: Alex Elder

xfs: replace KM_LARGE with explicit vmalloc use

We use the KM_LARGE flag to make kmem_alloc and friends use vmalloc
if necessary.  As we only need this for a few boot/mount time
allocations just switch to explicit vmalloc calls there.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NAlex Elder <aelder@sgi.com>
上级 a14a348b
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
...@@ -24,8 +23,25 @@ ...@@ -24,8 +23,25 @@
#include "time.h" #include "time.h"
#include "kmem.h" #include "kmem.h"
#define MAX_VMALLOCS 6 /*
#define MAX_SLAB_SIZE 0x20000 * Greedy allocation. May fail and may return vmalloced memory.
*
* Must be freed using kmem_free_large.
*/
void *
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
{
void *ptr;
size_t kmsize = maxsize;
while (!(ptr = kmem_zalloc_large(kmsize))) {
if ((kmsize >>= 1) <= minsize)
kmsize = minsize;
}
if (ptr)
*size = kmsize;
return ptr;
}
void * void *
kmem_alloc(size_t size, unsigned int __nocast flags) kmem_alloc(size_t size, unsigned int __nocast flags)
...@@ -34,19 +50,8 @@ kmem_alloc(size_t size, unsigned int __nocast flags) ...@@ -34,19 +50,8 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
gfp_t lflags = kmem_flags_convert(flags); gfp_t lflags = kmem_flags_convert(flags);
void *ptr; void *ptr;
#ifdef DEBUG
if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
printk(KERN_WARNING "Large %s attempt, size=%ld\n",
__func__, (long)size);
dump_stack();
}
#endif
do { do {
if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) ptr = kmalloc(size, lflags);
ptr = kmalloc(size, lflags);
else
ptr = __vmalloc(size, lflags, PAGE_KERNEL);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr; return ptr;
if (!(++retries % 100)) if (!(++retries % 100))
...@@ -68,27 +73,6 @@ kmem_zalloc(size_t size, unsigned int __nocast flags) ...@@ -68,27 +73,6 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
return ptr; return ptr;
} }
void *
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
unsigned int __nocast flags)
{
void *ptr;
size_t kmsize = maxsize;
unsigned int kmflags = (flags & ~KM_SLEEP) | KM_NOSLEEP;
while (!(ptr = kmem_zalloc(kmsize, kmflags))) {
if ((kmsize <= minsize) && (flags & KM_NOSLEEP))
break;
if ((kmsize >>= 1) <= minsize) {
kmsize = minsize;
kmflags = flags;
}
}
if (ptr)
*size = kmsize;
return ptr;
}
void void
kmem_free(const void *ptr) kmem_free(const void *ptr)
{ {
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/vmalloc.h>
/* /*
* General memory allocation interfaces * General memory allocation interfaces
...@@ -30,7 +31,6 @@ ...@@ -30,7 +31,6 @@
#define KM_NOSLEEP 0x0002u #define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u #define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u #define KM_MAYFAIL 0x0008u
#define KM_LARGE 0x0010u
/* /*
* We use a special process flag to avoid recursive callbacks into * We use a special process flag to avoid recursive callbacks into
...@@ -42,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags) ...@@ -42,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags)
{ {
gfp_t lflags; gfp_t lflags;
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE)); BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
if (flags & KM_NOSLEEP) { if (flags & KM_NOSLEEP) {
lflags = GFP_ATOMIC | __GFP_NOWARN; lflags = GFP_ATOMIC | __GFP_NOWARN;
...@@ -56,10 +56,25 @@ kmem_flags_convert(unsigned int __nocast flags) ...@@ -56,10 +56,25 @@ kmem_flags_convert(unsigned int __nocast flags)
extern void *kmem_alloc(size_t, unsigned int __nocast); extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast); extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast); extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast);
extern void kmem_free(const void *); extern void kmem_free(const void *);
static inline void *kmem_zalloc_large(size_t size)
{
void *ptr;
ptr = vmalloc(size);
if (ptr)
memset(ptr, 0, size);
return ptr;
}
static inline void kmem_free_large(void *ptr)
{
vfree(ptr);
}
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
/* /*
* Zone interfaces * Zone interfaces
*/ */
......
...@@ -1525,8 +1525,8 @@ xfs_alloc_bufhash( ...@@ -1525,8 +1525,8 @@ xfs_alloc_bufhash(
btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */ btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
btp->bt_hashmask = (1 << btp->bt_hashshift) - 1; btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) * btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE); sizeof(xfs_bufhash_t));
for (i = 0; i < (1 << btp->bt_hashshift); i++) { for (i = 0; i < (1 << btp->bt_hashshift); i++) {
spin_lock_init(&btp->bt_hash[i].bh_lock); spin_lock_init(&btp->bt_hash[i].bh_lock);
INIT_LIST_HEAD(&btp->bt_hash[i].bh_list); INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
...@@ -1537,7 +1537,7 @@ STATIC void ...@@ -1537,7 +1537,7 @@ STATIC void
xfs_free_bufhash( xfs_free_bufhash(
xfs_buftarg_t *btp) xfs_buftarg_t *btp)
{ {
kmem_free(btp->bt_hash); kmem_free_large(btp->bt_hash);
btp->bt_hash = NULL; btp->bt_hash = NULL;
} }
......
...@@ -118,9 +118,14 @@ xfs_Gqm_init(void) ...@@ -118,9 +118,14 @@ xfs_Gqm_init(void)
*/ */
udqhash = kmem_zalloc_greedy(&hsize, udqhash = kmem_zalloc_greedy(&hsize,
XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t), XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
KM_SLEEP | KM_MAYFAIL | KM_LARGE); if (!udqhash)
gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE); goto out;
gdqhash = kmem_zalloc_large(hsize);
if (!udqhash)
goto out_free_udqhash;
hsize /= sizeof(xfs_dqhash_t); hsize /= sizeof(xfs_dqhash_t);
ndquot = hsize << 8; ndquot = hsize << 8;
...@@ -170,6 +175,11 @@ xfs_Gqm_init(void) ...@@ -170,6 +175,11 @@ xfs_Gqm_init(void)
mutex_init(&qcheck_lock); mutex_init(&qcheck_lock);
#endif #endif
return xqm; return xqm;
out_free_udqhash:
kmem_free_large(udqhash);
out:
return NULL;
} }
/* /*
...@@ -189,8 +199,8 @@ xfs_qm_destroy( ...@@ -189,8 +199,8 @@ xfs_qm_destroy(
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
} }
kmem_free(xqm->qm_usr_dqhtable); kmem_free_large(xqm->qm_usr_dqhtable);
kmem_free(xqm->qm_grp_dqhtable); kmem_free_large(xqm->qm_grp_dqhtable);
xqm->qm_usr_dqhtable = NULL; xqm->qm_usr_dqhtable = NULL;
xqm->qm_grp_dqhtable = NULL; xqm->qm_grp_dqhtable = NULL;
xqm->qm_dqhashmask = 0; xqm->qm_dqhashmask = 0;
...@@ -219,8 +229,12 @@ xfs_qm_hold_quotafs_ref( ...@@ -219,8 +229,12 @@ xfs_qm_hold_quotafs_ref(
*/ */
mutex_lock(&xfs_Gqm_lock); mutex_lock(&xfs_Gqm_lock);
if (xfs_Gqm == NULL) if (!xfs_Gqm) {
xfs_Gqm = xfs_Gqm_init(); xfs_Gqm = xfs_Gqm_init();
if (!xfs_Gqm)
return ENOMEM;
}
/* /*
* We can keep a list of all filesystems with quotas mounted for * We can keep a list of all filesystems with quotas mounted for
* debugging and statistical purposes, but ... * debugging and statistical purposes, but ...
......
...@@ -408,8 +408,10 @@ xfs_bulkstat( ...@@ -408,8 +408,10 @@ xfs_bulkstat(
(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
nimask = ~(nicluster - 1); nimask = ~(nicluster - 1);
nbcluster = nicluster >> mp->m_sb.sb_inopblog; nbcluster = nicluster >> mp->m_sb.sb_inopblog;
irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4, irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
KM_SLEEP | KM_MAYFAIL | KM_LARGE); if (!irbuf)
return ENOMEM;
nirbuf = irbsize / sizeof(*irbuf); nirbuf = irbsize / sizeof(*irbuf);
/* /*
...@@ -727,7 +729,7 @@ xfs_bulkstat( ...@@ -727,7 +729,7 @@ xfs_bulkstat(
/* /*
* Done, we're either out of filesystem or space to put the data. * Done, we're either out of filesystem or space to put the data.
*/ */
kmem_free(irbuf); kmem_free_large(irbuf);
*ubcountp = ubelem; *ubcountp = ubelem;
/* /*
* Found some inodes, return them now and return the error next time. * Found some inodes, return them now and return the error next time.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册