提交 22a40fd9 编写于 作者: L Linus Torvalds

Merge tag 'dlm-3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm

Pull dlm updates from David Teigland:
 "This set fixes some conditions in which value blocks are invalidated,
  and includes two trivial cleanups."

* tag 'dlm-3.8' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm:
  dlm: fix lvb invalidation conditions
  fs/dlm: remove CONFIG_EXPERIMENTAL
  dlm: remove unused variable in *dlm_lowcomms_get_buffer()
menuconfig DLM menuconfig DLM
tristate "Distributed Lock Manager (DLM)" tristate "Distributed Lock Manager (DLM)"
depends on EXPERIMENTAL && INET depends on INET
depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n) depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n)
select IP_SCTP select IP_SCTP
help help
......
...@@ -337,6 +337,7 @@ enum rsb_flags { ...@@ -337,6 +337,7 @@ enum rsb_flags {
RSB_NEW_MASTER2, RSB_NEW_MASTER2,
RSB_RECOVER_CONVERT, RSB_RECOVER_CONVERT,
RSB_RECOVER_GRANT, RSB_RECOVER_GRANT,
RSB_RECOVER_LVB_INVAL,
}; };
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag) static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
......
...@@ -5393,6 +5393,13 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r, ...@@ -5393,6 +5393,13 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
if ((lkb->lkb_nodeid == nodeid_gone) || if ((lkb->lkb_nodeid == nodeid_gone) ||
dlm_is_removed(ls, lkb->lkb_nodeid)) { dlm_is_removed(ls, lkb->lkb_nodeid)) {
/* tell recover_lvb to invalidate the lvb
because a node holding EX/PW failed */
if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
(lkb->lkb_grmode >= DLM_LOCK_PW)) {
rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
}
del_lkb(r, lkb); del_lkb(r, lkb);
/* this put should free the lkb */ /* this put should free the lkb */
...@@ -6025,15 +6032,18 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) ...@@ -6025,15 +6032,18 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
return error; return error;
} }
/* The force flag allows the unlock to go ahead even if the lkb isn't granted. /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
Regardless of what rsb queue the lock is on, it's removed and freed. */ granted. Regardless of what rsb queue the lock is on, it's removed and
freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
{ {
struct dlm_args args; struct dlm_args args;
int error; int error;
set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args); set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
lkb->lkb_ua, &args);
error = unlock_lock(ls, lkb, &args); error = unlock_lock(ls, lkb, &args);
if (error == -DLM_EUNLOCK) if (error == -DLM_EUNLOCK)
......
...@@ -1385,7 +1385,6 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) ...@@ -1385,7 +1385,6 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
struct connection *con; struct connection *con;
struct writequeue_entry *e; struct writequeue_entry *e;
int offset = 0; int offset = 0;
int users = 0;
con = nodeid2con(nodeid, allocation); con = nodeid2con(nodeid, allocation);
if (!con) if (!con)
...@@ -1399,7 +1398,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) ...@@ -1399,7 +1398,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
} else { } else {
offset = e->end; offset = e->end;
e->end += len; e->end += len;
users = e->users++; e->users++;
} }
spin_unlock(&con->writequeue_lock); spin_unlock(&con->writequeue_lock);
...@@ -1414,7 +1413,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) ...@@ -1414,7 +1413,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
spin_lock(&con->writequeue_lock); spin_lock(&con->writequeue_lock);
offset = e->end; offset = e->end;
e->end += len; e->end += len;
users = e->users++; e->users++;
list_add_tail(&e->list, &con->writequeue); list_add_tail(&e->list, &con->writequeue);
spin_unlock(&con->writequeue_lock); spin_unlock(&con->writequeue_lock);
goto got_one; goto got_one;
......
...@@ -717,8 +717,14 @@ void dlm_recovered_lock(struct dlm_rsb *r) ...@@ -717,8 +717,14 @@ void dlm_recovered_lock(struct dlm_rsb *r)
* the VALNOTVALID flag if necessary, and determining the correct lvb contents * the VALNOTVALID flag if necessary, and determining the correct lvb contents
* based on the lvb's of the locks held on the rsb. * based on the lvb's of the locks held on the rsb.
* *
* RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it * RSB_VALNOTVALID is set in two cases:
* was already set prior to recovery, it's not cleared, regardless of locks. *
* 1. we are master, but not new, and we purged an EX/PW lock held by a
* failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
*
* 2. we are a new master, and there are only NL/CR locks left.
* (We could probably improve this by only invaliding in this way when
* the previous master left uncleanly. VMS docs mention that.)
* *
* The LVB contents are only considered for changing when this is a new master * The LVB contents are only considered for changing when this is a new master
* of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
...@@ -734,6 +740,19 @@ static void recover_lvb(struct dlm_rsb *r) ...@@ -734,6 +740,19 @@ static void recover_lvb(struct dlm_rsb *r)
int big_lock_exists = 0; int big_lock_exists = 0;
int lvblen = r->res_ls->ls_lvblen; int lvblen = r->res_ls->ls_lvblen;
if (!rsb_flag(r, RSB_NEW_MASTER2) &&
rsb_flag(r, RSB_RECOVER_LVB_INVAL)) {
/* case 1 above */
rsb_set_flag(r, RSB_VALNOTVALID);
return;
}
if (!rsb_flag(r, RSB_NEW_MASTER2))
return;
/* we are the new master, so figure out if VALNOTVALID should
be set, and set the rsb lvb from the best lkb available. */
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
continue; continue;
...@@ -772,13 +791,10 @@ static void recover_lvb(struct dlm_rsb *r) ...@@ -772,13 +791,10 @@ static void recover_lvb(struct dlm_rsb *r)
if (!lock_lvb_exists) if (!lock_lvb_exists)
goto out; goto out;
/* lvb is invalidated if only NL/CR locks remain */
if (!big_lock_exists) if (!big_lock_exists)
rsb_set_flag(r, RSB_VALNOTVALID); rsb_set_flag(r, RSB_VALNOTVALID);
/* don't mess with the lvb unless we're the new master */
if (!rsb_flag(r, RSB_NEW_MASTER2))
goto out;
if (!r->res_lvbptr) { if (!r->res_lvbptr) {
r->res_lvbptr = dlm_allocate_lvb(r->res_ls); r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
if (!r->res_lvbptr) if (!r->res_lvbptr)
...@@ -852,12 +868,19 @@ void dlm_recover_rsbs(struct dlm_ls *ls) ...@@ -852,12 +868,19 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
if (is_master(r)) { if (is_master(r)) {
if (rsb_flag(r, RSB_RECOVER_CONVERT)) if (rsb_flag(r, RSB_RECOVER_CONVERT))
recover_conversion(r); recover_conversion(r);
/* recover lvb before granting locks so the updated
lvb/VALNOTVALID is presented in the completion */
recover_lvb(r);
if (rsb_flag(r, RSB_NEW_MASTER2)) if (rsb_flag(r, RSB_NEW_MASTER2))
recover_grant(r); recover_grant(r);
recover_lvb(r);
count++; count++;
} else {
rsb_clear_flag(r, RSB_VALNOTVALID);
} }
rsb_clear_flag(r, RSB_RECOVER_CONVERT); rsb_clear_flag(r, RSB_RECOVER_CONVERT);
rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL);
rsb_clear_flag(r, RSB_NEW_MASTER2); rsb_clear_flag(r, RSB_NEW_MASTER2);
unlock_rsb(r); unlock_rsb(r);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册