提交 fa3536cc 编写于 作者: E Eric Dumazet 提交者: Linus Torvalds

[PATCH] Use __read_mostly on some hot fs variables

I discovered on oprofile hunting on a SMP platform that dentry lookups were
slowed down because d_hash_mask, d_hash_shift and dentry_hashtable were in
a cache line that contained inodes_stat.  So each time inodes_stats is
changed by a cpu, other cpus have to refill their cache line.

This patch moves some variables to the __read_mostly section, in order to
avoid false sharing.  RCU dentry lookups can go full speed.
Signed-off-by: NEric Dumazet <dada1@cosmosbay.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 878a9f30
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#define BIO_POOL_SIZE 256 #define BIO_POOL_SIZE 256
static kmem_cache_t *bio_slab; static kmem_cache_t *bio_slab __read_mostly;
#define BIOVEC_NR_POOLS 6 #define BIOVEC_NR_POOLS 6
...@@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab; ...@@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab;
* basically we just need to survive * basically we just need to survive
*/ */
#define BIO_SPLIT_ENTRIES 8 #define BIO_SPLIT_ENTRIES 8
mempool_t *bio_split_pool; mempool_t *bio_split_pool __read_mostly;
struct biovec_slab { struct biovec_slab {
int nr_vecs; int nr_vecs;
......
...@@ -234,7 +234,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) ...@@ -234,7 +234,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
*/ */
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
static kmem_cache_t * bdev_cachep; static kmem_cache_t * bdev_cachep __read_mostly;
static struct inode *bdev_alloc_inode(struct super_block *sb) static struct inode *bdev_alloc_inode(struct super_block *sb)
{ {
...@@ -308,7 +308,7 @@ static struct file_system_type bd_type = { ...@@ -308,7 +308,7 @@ static struct file_system_type bd_type = {
.kill_sb = kill_anon_super, .kill_sb = kill_anon_super,
}; };
static struct vfsmount *bd_mnt; static struct vfsmount *bd_mnt __read_mostly;
struct super_block *blockdev_superblock; struct super_block *blockdev_superblock;
void __init bdev_cache_init(void) void __init bdev_cache_init(void)
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
/* #define DCACHE_DEBUG 1 */ /* #define DCACHE_DEBUG 1 */
int sysctl_vfs_cache_pressure = 100; int sysctl_vfs_cache_pressure __read_mostly = 100;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
...@@ -44,7 +44,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; ...@@ -44,7 +44,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
EXPORT_SYMBOL(dcache_lock); EXPORT_SYMBOL(dcache_lock);
static kmem_cache_t *dentry_cache; static kmem_cache_t *dentry_cache __read_mostly;
#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
...@@ -59,9 +59,9 @@ static kmem_cache_t *dentry_cache; ...@@ -59,9 +59,9 @@ static kmem_cache_t *dentry_cache;
#define D_HASHBITS d_hash_shift #define D_HASHBITS d_hash_shift
#define D_HASHMASK d_hash_mask #define D_HASHMASK d_hash_mask
static unsigned int d_hash_mask; static unsigned int d_hash_mask __read_mostly;
static unsigned int d_hash_shift; static unsigned int d_hash_shift __read_mostly;
static struct hlist_head *dentry_hashtable; static struct hlist_head *dentry_hashtable __read_mostly;
static LIST_HEAD(dentry_unused); static LIST_HEAD(dentry_unused);
/* Statistics gathering. */ /* Statistics gathering. */
...@@ -1719,10 +1719,10 @@ static void __init dcache_init(unsigned long mempages) ...@@ -1719,10 +1719,10 @@ static void __init dcache_init(unsigned long mempages)
} }
/* SLAB cache for __getname() consumers */ /* SLAB cache for __getname() consumers */
kmem_cache_t *names_cachep; kmem_cache_t *names_cachep __read_mostly;
/* SLAB cache for file structures */ /* SLAB cache for file structures */
kmem_cache_t *filp_cachep; kmem_cache_t *filp_cachep __read_mostly;
EXPORT_SYMBOL(d_genocide); EXPORT_SYMBOL(d_genocide);
......
...@@ -38,9 +38,9 @@ struct dcookie_struct { ...@@ -38,9 +38,9 @@ struct dcookie_struct {
static LIST_HEAD(dcookie_users); static LIST_HEAD(dcookie_users);
static DEFINE_MUTEX(dcookie_mutex); static DEFINE_MUTEX(dcookie_mutex);
static kmem_cache_t * dcookie_cache; static kmem_cache_t *dcookie_cache __read_mostly;
static struct list_head * dcookie_hashtable; static struct list_head *dcookie_hashtable __read_mostly;
static size_t hash_size; static size_t hash_size __read_mostly;
static inline int is_live(void) static inline int is_live(void)
{ {
......
...@@ -21,9 +21,9 @@ ...@@ -21,9 +21,9 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/slab.h> #include <linux/slab.h>
int dir_notify_enable = 1; int dir_notify_enable __read_mostly = 1;
static kmem_cache_t *dn_cache; static kmem_cache_t *dn_cache __read_mostly;
static void redo_inode_mask(struct inode *inode) static void redo_inode_mask(struct inode *inode)
{ {
......
...@@ -281,13 +281,13 @@ static struct mutex epmutex; ...@@ -281,13 +281,13 @@ static struct mutex epmutex;
static struct poll_safewake psw; static struct poll_safewake psw;
/* Slab cache used to allocate "struct epitem" */ /* Slab cache used to allocate "struct epitem" */
static kmem_cache_t *epi_cache; static kmem_cache_t *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */ /* Slab cache used to allocate "struct eppoll_entry" */
static kmem_cache_t *pwq_cache; static kmem_cache_t *pwq_cache __read_mostly;
/* Virtual fs used to allocate inodes for eventpoll files */ /* Virtual fs used to allocate inodes for eventpoll files */
static struct vfsmount *eventpoll_mnt; static struct vfsmount *eventpoll_mnt __read_mostly;
/* File callbacks that implement the eventpoll file behaviour */ /* File callbacks that implement the eventpoll file behaviour */
static struct file_operations eventpoll_fops = { static struct file_operations eventpoll_fops = {
......
...@@ -412,7 +412,7 @@ asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg ...@@ -412,7 +412,7 @@ asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg
/* Table to convert sigio signal codes into poll band bitmaps */ /* Table to convert sigio signal codes into poll band bitmaps */
static long band_table[NSIGPOLL] = { static const long band_table[NSIGPOLL] = {
POLLIN | POLLRDNORM, /* POLL_IN */ POLLIN | POLLRDNORM, /* POLL_IN */
POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
...@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown) ...@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown)
} }
static DEFINE_RWLOCK(fasync_lock); static DEFINE_RWLOCK(fasync_lock);
static kmem_cache_t *fasync_cache; static kmem_cache_t *fasync_cache __read_mostly;
/* /*
* fasync_helper() is used by some character device drivers (mainly mice) * fasync_helper() is used by some character device drivers (mainly mice)
......
...@@ -56,8 +56,8 @@ ...@@ -56,8 +56,8 @@
#define I_HASHBITS i_hash_shift #define I_HASHBITS i_hash_shift
#define I_HASHMASK i_hash_mask #define I_HASHMASK i_hash_mask
static unsigned int i_hash_mask; static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift; static unsigned int i_hash_shift __read_mostly;
/* /*
* Each inode can be on two separate lists. One is * Each inode can be on two separate lists. One is
...@@ -73,7 +73,7 @@ static unsigned int i_hash_shift; ...@@ -73,7 +73,7 @@ static unsigned int i_hash_shift;
LIST_HEAD(inode_in_use); LIST_HEAD(inode_in_use);
LIST_HEAD(inode_unused); LIST_HEAD(inode_unused);
static struct hlist_head *inode_hashtable; static struct hlist_head *inode_hashtable __read_mostly;
/* /*
* A simple spinlock to protect the list manipulations. * A simple spinlock to protect the list manipulations.
...@@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex); ...@@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex);
*/ */
struct inodes_stat_t inodes_stat; struct inodes_stat_t inodes_stat;
static kmem_cache_t * inode_cachep; static kmem_cache_t * inode_cachep __read_mostly;
static struct inode *alloc_inode(struct super_block *sb) static struct inode *alloc_inode(struct super_block *sb)
{ {
......
...@@ -39,15 +39,15 @@ ...@@ -39,15 +39,15 @@
static atomic_t inotify_cookie; static atomic_t inotify_cookie;
static kmem_cache_t *watch_cachep; static kmem_cache_t *watch_cachep __read_mostly;
static kmem_cache_t *event_cachep; static kmem_cache_t *event_cachep __read_mostly;
static struct vfsmount *inotify_mnt; static struct vfsmount *inotify_mnt __read_mostly;
/* these are configurable via /proc/sys/fs/inotify/ */ /* these are configurable via /proc/sys/fs/inotify/ */
int inotify_max_user_instances; int inotify_max_user_instances __read_mostly;
int inotify_max_user_watches; int inotify_max_user_watches __read_mostly;
int inotify_max_queued_events; int inotify_max_queued_events __read_mostly;
/* /*
* Lock ordering: * Lock ordering:
......
...@@ -142,7 +142,7 @@ int lease_break_time = 45; ...@@ -142,7 +142,7 @@ int lease_break_time = 45;
static LIST_HEAD(file_lock_list); static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list); static LIST_HEAD(blocked_list);
static kmem_cache_t *filelock_cache; static kmem_cache_t *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */ /* Allocate an empty lock structure. */
static struct file_lock *locks_alloc_lock(void) static struct file_lock *locks_alloc_lock(void)
......
...@@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); ...@@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
static int event; static int event;
static struct list_head *mount_hashtable; static struct list_head *mount_hashtable __read_mostly;
static int hash_mask __read_mostly, hash_bits __read_mostly; static int hash_mask __read_mostly, hash_bits __read_mostly;
static kmem_cache_t *mnt_cache; static kmem_cache_t *mnt_cache __read_mostly;
static struct rw_semaphore namespace_sem; static struct rw_semaphore namespace_sem;
/* /sys/fs */ /* /sys/fs */
......
...@@ -675,7 +675,7 @@ struct inode* pipe_new(struct inode* inode) ...@@ -675,7 +675,7 @@ struct inode* pipe_new(struct inode* inode)
return NULL; return NULL;
} }
static struct vfsmount *pipe_mnt; static struct vfsmount *pipe_mnt __read_mostly;
static int pipefs_delete_dentry(struct dentry *dentry) static int pipefs_delete_dentry(struct dentry *dentry)
{ {
return 1; return 1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册