提交 8758280f 编写于 作者: N Nathan Scott

[XFS] Cleanup the use of zones/slabs, more consistent and allows flags to

be passed.

SGI-PV: 949073
SGI-Modid: xfs-linux-melb:xfs-kern:25122a
Signed-off-by: NNathan Scott <nathans@sgi.com>
上级 8d280b98
...@@ -23,17 +23,8 @@ ...@@ -23,17 +23,8 @@
#include <linux/mm.h> #include <linux/mm.h>
/* /*
* memory management routines * Process flags handling
*/ */
#define KM_SLEEP 0x0001u
#define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u
#define kmem_zone kmem_cache
#define kmem_zone_t struct kmem_cache
typedef unsigned long xfs_pflags_t;
#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO) #define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO)
#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS) #define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS)
...@@ -67,74 +58,102 @@ typedef unsigned long xfs_pflags_t; ...@@ -67,74 +58,102 @@ typedef unsigned long xfs_pflags_t;
*(NSTATEP) = *(OSTATEP); \ *(NSTATEP) = *(OSTATEP); \
} while (0) } while (0)
static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags) /*
* General memory allocation interfaces
*/
#define KM_SLEEP 0x0001u
#define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u
/*
* We use a special process flag to avoid recursive callbacks into
* the filesystem during transactions. We will also issue our own
* warnings, so we explicitly skip any generic ones (silly of us).
*/
static inline gfp_t
kmem_flags_convert(unsigned int __nocast flags)
{ {
gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */ gfp_t lflags;
#ifdef DEBUG BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
printk(KERN_WARNING
"XFS: memory allocation with wrong flags (%x)\n", flags);
BUG();
}
#endif
if (flags & KM_NOSLEEP) { if (flags & KM_NOSLEEP) {
lflags |= GFP_ATOMIC; lflags = GFP_ATOMIC | __GFP_NOWARN;
} else { } else {
lflags |= GFP_KERNEL; lflags = GFP_KERNEL | __GFP_NOWARN;
/* avoid recusive callbacks to filesystem during transactions */
if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS)) if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
lflags &= ~__GFP_FS; lflags &= ~__GFP_FS;
} }
return lflags;
return lflags;
} }
static __inline kmem_zone_t * extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void kmem_free(void *, size_t);
/*
* Zone interfaces
*/
#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
#define KM_ZONE_SPREAD 0
#define kmem_zone kmem_cache
#define kmem_zone_t struct kmem_cache
static inline kmem_zone_t *
kmem_zone_init(int size, char *zone_name) kmem_zone_init(int size, char *zone_name)
{ {
return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
} }
static __inline void static inline kmem_zone_t *
kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
void (*construct)(void *, kmem_zone_t *, unsigned long))
{
return kmem_cache_create(zone_name, size, 0, flags, construct, NULL);
}
static inline void
kmem_zone_free(kmem_zone_t *zone, void *ptr) kmem_zone_free(kmem_zone_t *zone, void *ptr)
{ {
kmem_cache_free(zone, ptr); kmem_cache_free(zone, ptr);
} }
static __inline void static inline void
kmem_zone_destroy(kmem_zone_t *zone) kmem_zone_destroy(kmem_zone_t *zone)
{ {
if (zone && kmem_cache_destroy(zone)) if (zone && kmem_cache_destroy(zone))
BUG(); BUG();
} }
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_alloc(size_t, unsigned int __nocast); /*
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); * Low memory cache shrinkers
extern void *kmem_zalloc(size_t, unsigned int __nocast); */
extern void kmem_free(void *, size_t);
typedef struct shrinker *kmem_shaker_t; typedef struct shrinker *kmem_shaker_t;
typedef int (*kmem_shake_func_t)(int, gfp_t); typedef int (*kmem_shake_func_t)(int, gfp_t);
static __inline kmem_shaker_t static inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc) kmem_shake_register(kmem_shake_func_t sfunc)
{ {
return set_shrinker(DEFAULT_SEEKS, sfunc); return set_shrinker(DEFAULT_SEEKS, sfunc);
} }
static __inline void static inline void
kmem_shake_deregister(kmem_shaker_t shrinker) kmem_shake_deregister(kmem_shaker_t shrinker)
{ {
remove_shrinker(shrinker); remove_shrinker(shrinker);
} }
static __inline int static inline int
kmem_shake_allow(gfp_t gfp_mask) kmem_shake_allow(gfp_t gfp_mask)
{ {
return (gfp_mask & __GFP_WAIT); return (gfp_mask & __GFP_WAIT);
......
...@@ -1805,13 +1805,12 @@ xfs_flush_buftarg( ...@@ -1805,13 +1805,12 @@ xfs_flush_buftarg(
int __init int __init
xfs_buf_init(void) xfs_buf_init(void)
{ {
int error = -ENOMEM;
#ifdef XFS_BUF_TRACE #ifdef XFS_BUF_TRACE
xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
#endif #endif
xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
KM_ZONE_HWALIGN, NULL);
if (!xfs_buf_zone) if (!xfs_buf_zone)
goto out_free_trace_buf; goto out_free_trace_buf;
...@@ -1839,7 +1838,7 @@ xfs_buf_init(void) ...@@ -1839,7 +1838,7 @@ xfs_buf_init(void)
#ifdef XFS_BUF_TRACE #ifdef XFS_BUF_TRACE
ktrace_free(xfs_buf_trace_buf); ktrace_free(xfs_buf_trace_buf);
#endif #endif
return error; return -ENOMEM;
} }
void void
......
...@@ -337,8 +337,8 @@ linvfs_alloc_inode( ...@@ -337,8 +337,8 @@ linvfs_alloc_inode(
{ {
vnode_t *vp; vnode_t *vp;
vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP)); vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
if (!vp) if (unlikely(!vp))
return NULL; return NULL;
return LINVFS_GET_IP(vp); return LINVFS_GET_IP(vp);
} }
...@@ -352,23 +352,21 @@ linvfs_destroy_inode( ...@@ -352,23 +352,21 @@ linvfs_destroy_inode(
STATIC void STATIC void
linvfs_inode_init_once( linvfs_inode_init_once(
void *data, void *vnode,
kmem_cache_t *cachep, kmem_zone_t *zonep,
unsigned long flags) unsigned long flags)
{ {
vnode_t *vp = (vnode_t *)data;
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) SLAB_CTOR_CONSTRUCTOR)
inode_init_once(LINVFS_GET_IP(vp)); inode_init_once(LINVFS_GET_IP((vnode_t *)vnode));
} }
STATIC int STATIC int
linvfs_init_zones(void) xfs_init_zones(void)
{ {
xfs_vnode_zone = kmem_cache_create("xfs_vnode", xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t",
sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, KM_ZONE_HWALIGN | KM_ZONE_RECLAIM,
linvfs_inode_init_once, NULL); linvfs_inode_init_once);
if (!xfs_vnode_zone) if (!xfs_vnode_zone)
goto out; goto out;
...@@ -377,14 +375,12 @@ linvfs_init_zones(void) ...@@ -377,14 +375,12 @@ linvfs_init_zones(void)
goto out_destroy_vnode_zone; goto out_destroy_vnode_zone;
xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
mempool_alloc_slab, mempool_free_slab, mempool_alloc_slab, mempool_free_slab,
xfs_ioend_zone); xfs_ioend_zone);
if (!xfs_ioend_pool) if (!xfs_ioend_pool)
goto out_free_ioend_zone; goto out_free_ioend_zone;
return 0; return 0;
out_free_ioend_zone: out_free_ioend_zone:
kmem_zone_destroy(xfs_ioend_zone); kmem_zone_destroy(xfs_ioend_zone);
out_destroy_vnode_zone: out_destroy_vnode_zone:
...@@ -394,7 +390,7 @@ linvfs_init_zones(void) ...@@ -394,7 +390,7 @@ linvfs_init_zones(void)
} }
STATIC void STATIC void
linvfs_destroy_zones(void) xfs_destroy_zones(void)
{ {
mempool_destroy(xfs_ioend_pool); mempool_destroy(xfs_ioend_pool);
kmem_zone_destroy(xfs_vnode_zone); kmem_zone_destroy(xfs_vnode_zone);
...@@ -405,7 +401,7 @@ linvfs_destroy_zones(void) ...@@ -405,7 +401,7 @@ linvfs_destroy_zones(void)
* Attempt to flush the inode, this will actually fail * Attempt to flush the inode, this will actually fail
* if the inode is pinned, but we dirty the inode again * if the inode is pinned, but we dirty the inode again
* at the point when it is unpinned after a log write, * at the point when it is unpinned after a log write,
* since this is when the inode itself becomes flushable. * since this is when the inode itself becomes flushable.
*/ */
STATIC int STATIC int
linvfs_write_inode( linvfs_write_inode(
...@@ -963,7 +959,7 @@ init_xfs_fs( void ) ...@@ -963,7 +959,7 @@ init_xfs_fs( void )
ktrace_init(64); ktrace_init(64);
error = linvfs_init_zones(); error = xfs_init_zones();
if (error < 0) if (error < 0)
goto undo_zones; goto undo_zones;
...@@ -986,7 +982,7 @@ init_xfs_fs( void ) ...@@ -986,7 +982,7 @@ init_xfs_fs( void )
xfs_buf_terminate(); xfs_buf_terminate();
undo_buffers: undo_buffers:
linvfs_destroy_zones(); xfs_destroy_zones();
undo_zones: undo_zones:
return error; return error;
...@@ -1000,7 +996,7 @@ exit_xfs_fs( void ) ...@@ -1000,7 +996,7 @@ exit_xfs_fs( void )
unregister_filesystem(&xfs_fs_type); unregister_filesystem(&xfs_fs_type);
xfs_cleanup(); xfs_cleanup();
xfs_buf_terminate(); xfs_buf_terminate();
linvfs_destroy_zones(); xfs_destroy_zones();
ktrace_uninit(); ktrace_uninit();
} }
......
...@@ -380,7 +380,7 @@ typedef struct xfs_trans { ...@@ -380,7 +380,7 @@ typedef struct xfs_trans {
xfs_trans_header_t t_header; /* header for in-log trans */ xfs_trans_header_t t_header; /* header for in-log trans */
unsigned int t_busy_free; /* busy descs free */ unsigned int t_busy_free; /* busy descs free */
xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */
xfs_pflags_t t_pflags; /* saved pflags state */ unsigned long t_pflags; /* saved process flags state */
} xfs_trans_t; } xfs_trans_t;
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -77,11 +77,12 @@ xfs_init(void) ...@@ -77,11 +77,12 @@ xfs_init(void)
"xfs_bmap_free_item"); "xfs_bmap_free_item");
xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
"xfs_btree_cur"); "xfs_btree_cur");
xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode");
xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
xfs_da_state_zone = xfs_da_state_zone =
kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state");
xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
xfs_acl_zone_init(xfs_acl_zone, "xfs_acl");
/* /*
* The size of the zone allocated buf log item is the maximum * The size of the zone allocated buf log item is the maximum
...@@ -93,17 +94,30 @@ xfs_init(void) ...@@ -93,17 +94,30 @@ xfs_init(void)
(((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) /
NBWORD) * sizeof(int))), NBWORD) * sizeof(int))),
"xfs_buf_item"); "xfs_buf_item");
xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + xfs_efd_zone =
((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), kmem_zone_init((sizeof(xfs_efd_log_item_t) +
((XFS_EFD_MAX_FAST_EXTENTS - 1) *
sizeof(xfs_extent_t))),
"xfs_efd_item"); "xfs_efd_item");
xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + xfs_efi_zone =
((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), kmem_zone_init((sizeof(xfs_efi_log_item_t) +
((XFS_EFI_MAX_FAST_EXTENTS - 1) *
sizeof(xfs_extent_t))),
"xfs_efi_item"); "xfs_efi_item");
xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili"); /*
xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t), * These zones warrant special memory allocator hints
"xfs_chashlist"); */
xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); xfs_inode_zone =
kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
KM_ZONE_SPREAD, NULL);
xfs_ili_zone =
kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
KM_ZONE_SPREAD, NULL);
xfs_chashlist_zone =
kmem_zone_init_flags(sizeof(xfs_chashlist_t), "xfs_chashlist",
KM_ZONE_SPREAD, NULL);
/* /*
* Allocate global trace buffers. * Allocate global trace buffers.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册