提交 03d864c0 编写于 作者: D Daniel Phillips 提交者: Mark Fasheh

ocfs2: allocate lockres hash pages in an array

This allows us to have a hash table greater than a single page which greatly
improves dlm performance on some tests.
Signed-off-by: NDaniel Phillips <phillips@google.com>
Signed-off-by: NMark Fasheh <mark.fasheh@oracle.com>
上级 95c4f581
...@@ -37,7 +37,10 @@ ...@@ -37,7 +37,10 @@
#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
#define DLM_THREAD_MS 200 // flush at least every 200 ms #define DLM_THREAD_MS 200 // flush at least every 200 ms
#define DLM_HASH_BUCKETS (PAGE_SIZE / sizeof(struct hlist_head)) #define DLM_HASH_SIZE (1 << 14)
#define DLM_HASH_PAGES (DLM_HASH_SIZE / PAGE_SIZE)
#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
/* Intended to make it easier for us to switch out hash functions */ /* Intended to make it easier for us to switch out hash functions */
#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
...@@ -88,7 +91,7 @@ enum dlm_ctxt_state { ...@@ -88,7 +91,7 @@ enum dlm_ctxt_state {
struct dlm_ctxt struct dlm_ctxt
{ {
struct list_head list; struct list_head list;
struct hlist_head *lockres_hash; struct hlist_head **lockres_hash;
struct list_head dirty_list; struct list_head dirty_list;
struct list_head purge_list; struct list_head purge_list;
struct list_head pending_asts; struct list_head pending_asts;
...@@ -135,6 +138,11 @@ struct dlm_ctxt ...@@ -135,6 +138,11 @@ struct dlm_ctxt
struct list_head dlm_eviction_callbacks; struct list_head dlm_eviction_callbacks;
}; };
static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
{
return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
}
/* these keventd work queue items are for less-frequently /* these keventd work queue items are for less-frequently
* called functions that cannot be directly called from the * called functions that cannot be directly called from the
* net message handlers for some reason, usually because * net message handlers for some reason, usually because
......
...@@ -136,7 +136,7 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm) ...@@ -136,7 +136,7 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
spin_lock(&dlm->spinlock); spin_lock(&dlm->spinlock);
for (i=0; i<DLM_HASH_BUCKETS; i++) { for (i=0; i<DLM_HASH_BUCKETS; i++) {
bucket = &(dlm->lockres_hash[i]); bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, iter, bucket, hash_node) hlist_for_each_entry(res, iter, bucket, hash_node)
dlm_print_one_lock_resource(res); dlm_print_one_lock_resource(res);
} }
......
...@@ -49,6 +49,30 @@ ...@@ -49,6 +49,30 @@
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
#include "cluster/masklog.h" #include "cluster/masklog.h"
static void dlm_free_pagevec(void **vec, int pages)
{
while (pages--)
free_page((unsigned long)vec[pages]);
kfree(vec);
}
static void **dlm_alloc_pagevec(int pages)
{
void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL);
int i;
if (!vec)
return NULL;
for (i = 0; i < pages; i++)
if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
goto out_free;
return vec;
out_free:
dlm_free_pagevec(vec, i);
return NULL;
}
/* /*
* *
* spinlock lock ordering: if multiple locks are needed, obey this ordering: * spinlock lock ordering: if multiple locks are needed, obey this ordering:
...@@ -90,7 +114,7 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm, ...@@ -90,7 +114,7 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm,
assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->spinlock);
q = &res->lockname; q = &res->lockname;
bucket = &(dlm->lockres_hash[q->hash % DLM_HASH_BUCKETS]); bucket = dlm_lockres_hash(dlm, q->hash);
/* get a reference for our hashtable */ /* get a reference for our hashtable */
dlm_lockres_get(res); dlm_lockres_get(res);
...@@ -110,7 +134,8 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, ...@@ -110,7 +134,8 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->spinlock);
bucket = dlm->lockres_hash + full_name_hash(name, len) % DLM_HASH_BUCKETS; bucket = dlm_lockres_hash(dlm, hash);
hlist_for_each(list, bucket) { hlist_for_each(list, bucket) {
struct dlm_lock_resource *res = hlist_entry(list, struct dlm_lock_resource *res = hlist_entry(list,
struct dlm_lock_resource, hash_node); struct dlm_lock_resource, hash_node);
...@@ -191,7 +216,7 @@ static int dlm_wait_on_domain_helper(const char *domain) ...@@ -191,7 +216,7 @@ static int dlm_wait_on_domain_helper(const char *domain)
static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
{ {
if (dlm->lockres_hash) if (dlm->lockres_hash)
free_page((unsigned long) dlm->lockres_hash); dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
if (dlm->name) if (dlm->name)
kfree(dlm->name); kfree(dlm->name);
...@@ -301,8 +326,8 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm) ...@@ -301,8 +326,8 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm)
restart: restart:
spin_lock(&dlm->spinlock); spin_lock(&dlm->spinlock);
for (i = 0; i < DLM_HASH_BUCKETS; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
while (!hlist_empty(&dlm->lockres_hash[i])) { while (!hlist_empty(dlm_lockres_hash(dlm, i))) {
res = hlist_entry(dlm->lockres_hash[i].first, res = hlist_entry(dlm_lockres_hash(dlm, i)->first,
struct dlm_lock_resource, hash_node); struct dlm_lock_resource, hash_node);
/* need reference when manually grabbing lockres */ /* need reference when manually grabbing lockres */
dlm_lockres_get(res); dlm_lockres_get(res);
...@@ -1188,7 +1213,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, ...@@ -1188,7 +1213,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
goto leave; goto leave;
} }
dlm->lockres_hash = (struct hlist_head *) __get_free_page(GFP_KERNEL); dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
if (!dlm->lockres_hash) { if (!dlm->lockres_hash) {
mlog_errno(-ENOMEM); mlog_errno(-ENOMEM);
kfree(dlm->name); kfree(dlm->name);
...@@ -1197,8 +1222,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, ...@@ -1197,8 +1222,8 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
goto leave; goto leave;
} }
for (i=0; i<DLM_HASH_BUCKETS; i++) for (i = 0; i < DLM_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&dlm->lockres_hash[i]); INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
strcpy(dlm->name, domain); strcpy(dlm->name, domain);
dlm->key = key; dlm->key = key;
......
...@@ -1719,7 +1719,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, ...@@ -1719,7 +1719,7 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
* the RECOVERING state and set the owner * the RECOVERING state and set the owner
* if necessary */ * if necessary */
for (i = 0; i < DLM_HASH_BUCKETS; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = &(dlm->lockres_hash[i]); bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, hash_iter, bucket, hash_node) { hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
if (res->state & DLM_LOCK_RES_RECOVERING) { if (res->state & DLM_LOCK_RES_RECOVERING) {
if (res->owner == dead_node) { if (res->owner == dead_node) {
...@@ -1884,7 +1884,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) ...@@ -1884,7 +1884,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
* need to be fired as a result. * need to be fired as a result.
*/ */
for (i = 0; i < DLM_HASH_BUCKETS; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = &(dlm->lockres_hash[i]); bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, iter, bucket, hash_node) { hlist_for_each_entry(res, iter, bucket, hash_node) {
/* always prune any $RECOVERY entries for dead nodes, /* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */ * otherwise hangs can occur during later recovery */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册