提交 c51c8e7b 编写于 作者: H Hannes Reinecke 提交者: Nicholas Bellinger

target: use 'se_dev_entry' when allocating UAs

We need to use 'se_dev_entry' as argument when allocating
UAs, otherwise we'll never see any UAs for an implicit
ALUA state transition triggered from userspace.

(Add target_ua_allocate_lun() common caller - nab)
Signed-off-by: NHannes Reinecke <hare@suse.de>
Signed-off-by: NNicholas Bellinger <nab@linux-iscsi.org>
上级 31605813
...@@ -972,23 +972,32 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) ...@@ -972,23 +972,32 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
lacl = rcu_dereference_check(se_deve->se_lun_acl, lacl = rcu_dereference_check(se_deve->se_lun_acl,
lockdep_is_held(&lun->lun_deve_lock)); lockdep_is_held(&lun->lun_deve_lock));
/* /*
* se_deve->se_lun_acl pointer may be NULL for a * spc4r37 p.242:
* entry created without explicit Node+MappedLUN ACLs * After an explicit target port asymmetric access
* state change, a device server shall establish a
* unit attention condition with the additional sense
* code set to ASYMMETRIC ACCESS STATE CHANGED for
* the initiator port associated with every I_T nexus
* other than the I_T nexus on which the SET TARGET
* PORT GROUPS command was received.
*/ */
if (!lacl)
continue;
if ((tg_pt_gp->tg_pt_gp_alua_access_status == if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
(tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
(tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
(tg_pt_gp->tg_pt_gp_alua_lun != NULL) && (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
(tg_pt_gp->tg_pt_gp_alua_lun == lun)) (tg_pt_gp->tg_pt_gp_alua_lun == lun))
continue; continue;
core_scsi3_ua_allocate(lacl->se_lun_nacl, /*
se_deve->mapped_lun, 0x2A, * se_deve->se_lun_acl pointer may be NULL for a
* entry created without explicit Node+MappedLUN ACLs
*/
if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
(tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
continue;
core_scsi3_ua_allocate(se_deve, 0x2A,
ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
} }
spin_unlock_bh(&lun->lun_deve_lock); spin_unlock_bh(&lun->lun_deve_lock);
......
...@@ -2197,7 +2197,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, ...@@ -2197,7 +2197,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
&pr_tmpl->registration_list, &pr_tmpl->registration_list,
pr_reg_list) { pr_reg_list) {
core_scsi3_ua_allocate( target_ua_allocate_lun(
pr_reg_p->pr_reg_nacl, pr_reg_p->pr_reg_nacl,
pr_reg_p->pr_res_mapped_lun, pr_reg_p->pr_res_mapped_lun,
0x2A, 0x2A,
...@@ -2624,7 +2624,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, ...@@ -2624,7 +2624,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
if (pr_reg_p == pr_reg) if (pr_reg_p == pr_reg)
continue; continue;
core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl, target_ua_allocate_lun(pr_reg_p->pr_reg_nacl,
pr_reg_p->pr_res_mapped_lun, pr_reg_p->pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
} }
...@@ -2709,7 +2709,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key) ...@@ -2709,7 +2709,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
* additional sense code set to RESERVATIONS PREEMPTED. * additional sense code set to RESERVATIONS PREEMPTED.
*/ */
if (!calling_it_nexus) if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
} }
spin_unlock(&pr_tmpl->registration_lock); spin_unlock(&pr_tmpl->registration_lock);
...@@ -2918,7 +2918,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -2918,7 +2918,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
NULL, 0); NULL, 0);
} }
if (!calling_it_nexus) if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl, target_ua_allocate_lun(pr_reg_nacl,
pr_res_mapped_lun, 0x2A, pr_res_mapped_lun, 0x2A,
ASCQ_2AH_REGISTRATIONS_PREEMPTED); ASCQ_2AH_REGISTRATIONS_PREEMPTED);
} }
...@@ -3024,7 +3024,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -3024,7 +3024,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* persistent reservation and/or registration, with the * persistent reservation and/or registration, with the
* additional sense code set to REGISTRATIONS PREEMPTED; * additional sense code set to REGISTRATIONS PREEMPTED;
*/ */
core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
ASCQ_2AH_REGISTRATIONS_PREEMPTED); ASCQ_2AH_REGISTRATIONS_PREEMPTED);
} }
spin_unlock(&pr_tmpl->registration_lock); spin_unlock(&pr_tmpl->registration_lock);
...@@ -3057,7 +3057,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, ...@@ -3057,7 +3057,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if (calling_it_nexus) if (calling_it_nexus)
continue; continue;
core_scsi3_ua_allocate(pr_reg->pr_reg_nacl, target_ua_allocate_lun(pr_reg->pr_reg_nacl,
pr_reg->pr_res_mapped_lun, 0x2A, pr_reg->pr_res_mapped_lun, 0x2A,
ASCQ_2AH_RESERVATIONS_RELEASED); ASCQ_2AH_RESERVATIONS_RELEASED);
} }
......
...@@ -1677,13 +1677,13 @@ void transport_generic_request_failure(struct se_cmd *cmd, ...@@ -1677,13 +1677,13 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* See spc4r17, section 7.4.6 Control Mode Page, Table 349 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/ */
if (cmd->se_sess && if (cmd->se_sess &&
cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, target_ua_allocate_lun(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C, cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
}
trace_target_cmd_complete(cmd); trace_target_cmd_complete(cmd);
ret = cmd->se_tfo-> queue_status(cmd); ret = cmd->se_tfo->queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full; goto queue_full;
goto check_stop; goto check_stop;
......
...@@ -87,18 +87,11 @@ target_scsi3_ua_check(struct se_cmd *cmd) ...@@ -87,18 +87,11 @@ target_scsi3_ua_check(struct se_cmd *cmd)
} }
int core_scsi3_ua_allocate( int core_scsi3_ua_allocate(
struct se_node_acl *nacl, struct se_dev_entry *deve,
u64 unpacked_lun,
u8 asc, u8 asc,
u8 ascq) u8 ascq)
{ {
struct se_dev_entry *deve;
struct se_ua *ua, *ua_p, *ua_tmp; struct se_ua *ua, *ua_p, *ua_tmp;
/*
* PASSTHROUGH OPS
*/
if (!nacl)
return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!ua) { if (!ua) {
...@@ -110,12 +103,6 @@ int core_scsi3_ua_allocate( ...@@ -110,12 +103,6 @@ int core_scsi3_ua_allocate(
ua->ua_asc = asc; ua->ua_asc = asc;
ua->ua_ascq = ascq; ua->ua_ascq = ascq;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (!deve) {
rcu_read_unlock();
return -EINVAL;
}
spin_lock(&deve->ua_lock); spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
/* /*
...@@ -123,7 +110,6 @@ int core_scsi3_ua_allocate( ...@@ -123,7 +110,6 @@ int core_scsi3_ua_allocate(
*/ */
if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
rcu_read_unlock();
kmem_cache_free(se_ua_cache, ua); kmem_cache_free(se_ua_cache, ua);
return 0; return 0;
} }
...@@ -170,22 +156,38 @@ int core_scsi3_ua_allocate( ...@@ -170,22 +156,38 @@ int core_scsi3_ua_allocate(
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
atomic_inc_mb(&deve->ua_count); atomic_inc_mb(&deve->ua_count);
rcu_read_unlock();
return 0; return 0;
} }
list_add_tail(&ua->ua_nacl_list, &deve->ua_list); list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
spin_unlock(&deve->ua_lock); spin_unlock(&deve->ua_lock);
pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:" pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
" 0x%02x, ASCQ: 0x%02x\n", " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq); asc, ascq);
atomic_inc_mb(&deve->ua_count); atomic_inc_mb(&deve->ua_count);
rcu_read_unlock();
return 0; return 0;
} }
void target_ua_allocate_lun(struct se_node_acl *nacl,
u32 unpacked_lun, u8 asc, u8 ascq)
{
struct se_dev_entry *deve;
if (!nacl)
return;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (!deve) {
rcu_read_unlock();
return;
}
core_scsi3_ua_allocate(deve, asc, ascq);
rcu_read_unlock();
}
void core_scsi3_ua_release_all( void core_scsi3_ua_release_all(
struct se_dev_entry *deve) struct se_dev_entry *deve)
{ {
......
...@@ -28,7 +28,8 @@ ...@@ -28,7 +28,8 @@
extern struct kmem_cache *se_ua_cache; extern struct kmem_cache *se_ua_cache;
extern sense_reason_t target_scsi3_ua_check(struct se_cmd *); extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u64, u8, u8); extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *); extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册