提交 659743b0 编写于 作者: S Shlomo Pongratz 提交者: James Bottomley

[SCSI] libiscsi: Reduce locking contention in fast path

Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.

The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.

The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.

Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.

Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.

For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.

libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.

The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.

That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.

Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.

In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: NShlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: NOr Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: NMike Christie <michaelc@cs.wisc.edu>
Signed-off-by: NJames Bottomley <JBottomley@Parallels.com>
上级 5d0fddd0
......@@ -233,20 +233,20 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
spin_lock_bh(&session->lock);
spin_lock_bh(&session->frwd_lock);
if (!aborted_task || !aborted_task->sc) {
/* we raced */
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
return SUCCESS;
}
aborted_io_task = aborted_task->dd_data;
if (!aborted_io_task->scsi_cmnd) {
/* raced or invalid command */
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
return SUCCESS;
}
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
/* Invalidate WRB Posted for this Task */
AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
aborted_io_task->pwrb_handle->pwrb,
......@@ -311,9 +311,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
/* invalidate iocbs */
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
spin_lock_bh(&session->lock);
spin_lock_bh(&session->frwd_lock);
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
return FAILED;
}
conn = session->leadconn;
......@@ -342,7 +342,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
num_invalidate++;
inv_tbl++;
}
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
inv_tbl = phba->inv_tbl;
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
......@@ -1185,9 +1185,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
return 1;
}
spin_lock_bh(&session->lock);
spin_lock_bh(&session->back_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->back_lock);
return 0;
}
......@@ -1606,7 +1606,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
pwrb = pwrb_handle->pwrb;
type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
spin_lock_bh(&session->lock);
spin_lock_bh(&session->back_lock);
switch (type) {
case HWH_TYPE_IO:
case HWH_TYPE_IO_RD:
......@@ -1645,7 +1645,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
}
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->back_lock);
}
static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
......@@ -4693,9 +4693,9 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
* login/startup related tasks.
*/
beiscsi_conn->login_in_progress = 0;
spin_lock_bh(&session->lock);
spin_lock_bh(&session->back_lock);
beiscsi_cleanup_task(task);
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->back_lock);
pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
......
......@@ -1361,7 +1361,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
u32 datalen = 0;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
spin_lock_bh(&session->lock);
spin_lock_bh(&session->back_lock);
task = iscsi_itt_to_task(conn,
resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
if (!task)
......@@ -1432,7 +1432,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
conn->data, datalen);
fail:
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->back_lock);
return 0;
}
......@@ -1457,7 +1457,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
int pad_len;
login = (struct bnx2i_login_response *) cqe;
spin_lock(&session->lock);
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
if (!task)
......@@ -1500,7 +1500,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,
bnx2i_conn->gen_pdu.resp_buf,
bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
done:
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
return 0;
}
......@@ -1525,7 +1525,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,
int pad_len;
text = (struct bnx2i_text_response *) cqe;
spin_lock(&session->lock);
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
if (!task)
goto done;
......@@ -1561,7 +1561,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,
bnx2i_conn->gen_pdu.resp_wr_ptr -
bnx2i_conn->gen_pdu.resp_buf);
done:
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
return 0;
}
......@@ -1584,7 +1584,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,
struct iscsi_tm_rsp *resp_hdr;
tmf_cqe = (struct bnx2i_tmf_response *)cqe;
spin_lock(&session->lock);
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
if (!task)
......@@ -1600,7 +1600,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
done:
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
return 0;
}
......@@ -1623,7 +1623,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
struct iscsi_logout_rsp *resp_hdr;
logout = (struct bnx2i_logout_response *) cqe;
spin_lock(&session->lock);
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
if (!task)
......@@ -1647,7 +1647,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,
bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;
done:
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
return 0;
}
......@@ -1668,12 +1668,12 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
struct iscsi_task *task;
nop_in = (struct bnx2i_nop_in_msg *)cqe;
spin_lock(&session->lock);
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
if (task)
__iscsi_put_task(task);
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
}
/**
......@@ -1712,7 +1712,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
nop_in = (struct bnx2i_nop_in_msg *)cqe;
spin_lock(&session->lock);
spin_lock(&session->back_lock);
hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
memset(hdr, 0, sizeof(struct iscsi_hdr));
hdr->opcode = nop_in->op_code;
......@@ -1738,7 +1738,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
}
done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
return tgt_async_nop;
}
......@@ -1771,7 +1771,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
return;
}
spin_lock(&session->lock);
spin_lock(&session->back_lock);
resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
resp_hdr->opcode = async_cqe->op_code;
......@@ -1790,7 +1790,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
(struct iscsi_hdr *)resp_hdr, NULL, 0);
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
}
......@@ -1817,7 +1817,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,
} else
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
spin_lock(&session->lock);
spin_lock(&session->back_lock);
hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
memset(hdr, 0, sizeof(struct iscsi_hdr));
hdr->opcode = reject->op_code;
......@@ -1828,7 +1828,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,
hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
reject->data_length);
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
}
/**
......@@ -1848,13 +1848,13 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
struct iscsi_task *task;
cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
spin_lock(&session->lock);
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(conn,
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
if (!task)
printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
complete(&bnx2i_conn->cmd_cleanup_cmpl);
}
......@@ -1921,11 +1921,11 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
int rc = 0;
int cpu;
spin_lock(&session->lock);
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
if (!task || !task->sc) {
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
return -EINVAL;
}
sc = task->sc;
......@@ -1935,7 +1935,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
else
cpu = sc->request->cpu;
spin_unlock(&session->lock);
spin_unlock(&session->back_lock);
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
......
......@@ -1169,10 +1169,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)
if (task->state == ISCSI_TASK_ABRT_TMF) {
bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
spin_unlock_bh(&conn->session->lock);
spin_unlock_bh(&conn->session->back_lock);
wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
spin_lock_bh(&conn->session->lock);
spin_lock_bh(&conn->session->back_lock);
}
bnx2i_iscsi_unmap_sg_list(task->dd_data);
}
......@@ -2059,7 +2059,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
goto out;
if (session) {
spin_lock_bh(&session->lock);
spin_lock_bh(&session->frwd_lock);
if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
if (session->state == ISCSI_STATE_LOGGING_OUT) {
if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
......@@ -2075,7 +2075,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
} else
close = 1;
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
}
bnx2i_ep->state = EP_STATE_DISCONN_START;
......
......@@ -593,9 +593,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
spin_lock_bh(&session->lock);
spin_lock_bh(&session->frwd_lock);
tcp_sw_conn->sock = NULL;
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
sockfd_put(sock);
}
......@@ -663,10 +663,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err)
goto free_socket;
spin_lock_bh(&session->lock);
spin_lock_bh(&session->frwd_lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
/* setup Socket parameters */
sk = sock->sk;
......@@ -726,14 +726,14 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
switch(param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
spin_lock_bh(&conn->session->lock);
spin_lock_bh(&conn->session->frwd_lock);
if (!tcp_sw_conn || !tcp_sw_conn->sock) {
spin_unlock_bh(&conn->session->lock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
rc = kernel_getpeername(tcp_sw_conn->sock,
(struct sockaddr *)&addr, &len);
spin_unlock_bh(&conn->session->lock);
spin_unlock_bh(&conn->session->frwd_lock);
if (rc)
return rc;
......@@ -759,23 +759,23 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
spin_lock_bh(&session->lock);
spin_lock_bh(&session->frwd_lock);
conn = session->leadconn;
if (!conn) {
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
return -ENOTCONN;
}
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
if (!tcp_sw_conn->sock) {
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
return -ENOTCONN;
}
rc = kernel_getsockname(tcp_sw_conn->sock,
(struct sockaddr *)&addr, &len);
spin_unlock_bh(&session->lock);
spin_unlock_bh(&session->frwd_lock);
if (rc)
return rc;
......
此差异已折叠。
......@@ -446,7 +446,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
* iscsi_tcp_cleanup_task - free tcp_task resources
* @task: iscsi task
*
* must be called with session lock
* must be called with session back_lock
*/
void iscsi_tcp_cleanup_task(struct iscsi_task *task)
{
......@@ -457,6 +457,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
if (!task->sc)
return;
spin_lock_bh(&tcp_task->queue2pool);
/* flush task's r2t queues */
while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
......@@ -470,6 +471,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
sizeof(void*));
tcp_task->r2t = NULL;
}
spin_unlock_bh(&tcp_task->queue2pool);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
......@@ -577,11 +579,13 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
return ISCSI_ERR_DATALEN;
}
spin_lock(&tcp_task->pool2queue);
rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *));
if (!rc) {
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
"Target has sent more R2Ts than it "
"negotiated for or driver has leaked.\n");
spin_unlock(&tcp_task->pool2queue);
return ISCSI_ERR_PROTO;
}
......@@ -596,6 +600,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
tcp_task->exp_datasn = r2tsn + 1;
kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
conn->r2t_pdus_cnt++;
spin_unlock(&tcp_task->pool2queue);
iscsi_requeue_task(task);
return 0;
......@@ -668,14 +673,14 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
switch(opcode) {
case ISCSI_OP_SCSI_DATA_IN:
spin_lock(&conn->session->lock);
spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_ctask(conn, hdr->itt);
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else
rc = iscsi_tcp_data_in(conn, task);
if (rc) {
spin_unlock(&conn->session->lock);
spin_unlock(&conn->session->back_lock);
break;
}
......@@ -708,11 +713,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
tcp_conn->in.datalen,
iscsi_tcp_process_data_in,
rx_hash);
spin_unlock(&conn->session->lock);
spin_unlock(&conn->session->back_lock);
return rc;
}
rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
spin_unlock(&conn->session->lock);
spin_unlock(&conn->session->back_lock);
break;
case ISCSI_OP_SCSI_CMD_RSP:
if (tcp_conn->in.datalen) {
......@@ -722,18 +727,20 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break;
case ISCSI_OP_R2T:
spin_lock(&conn->session->lock);
spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_ctask(conn, hdr->itt);
spin_unlock(&conn->session->back_lock);
if (!task)
rc = ISCSI_ERR_BAD_ITT;
else if (ahslen)
rc = ISCSI_ERR_AHSLEN;
else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
task->last_xfer = jiffies;
spin_lock(&conn->session->frwd_lock);
rc = iscsi_tcp_r2t_rsp(conn, task);
spin_unlock(&conn->session->frwd_lock);
} else
rc = ISCSI_ERR_PROTO;
spin_unlock(&conn->session->lock);
break;
case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP:
......@@ -981,14 +988,13 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
{
struct iscsi_session *session = task->conn->session;
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_r2t_info *r2t = NULL;
if (iscsi_task_has_unsol_data(task))
r2t = &task->unsol_r2t;
else {
spin_lock_bh(&session->lock);
spin_lock_bh(&tcp_task->queue2pool);
if (tcp_task->r2t) {
r2t = tcp_task->r2t;
/* Continue with this R2T? */
......@@ -1010,7 +1016,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
else
r2t = tcp_task->r2t;
}
spin_unlock_bh(&session->lock);
spin_unlock_bh(&tcp_task->queue2pool);
}
return r2t;
......@@ -1140,6 +1146,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
iscsi_pool_free(&tcp_task->r2tpool);
goto r2t_alloc_fail;
}
spin_lock_init(&tcp_task->pool2queue);
spin_lock_init(&tcp_task->queue2pool);
}
return 0;
......
......@@ -385,9 +385,9 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
spin_lock(&conn->session->lock);
spin_lock(&conn->session->back_lock);
task = iscsi_itt_to_task(conn, itt);
spin_unlock(&conn->session->lock);
spin_unlock(&conn->session->back_lock);
if (task == NULL) {
ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
......
......@@ -327,12 +327,19 @@ struct iscsi_session {
struct iscsi_transport *tt;
struct Scsi_Host *host;
struct iscsi_conn *leadconn; /* leading connection */
spinlock_t lock; /* protects session state, *
* sequence numbers, *
/* Between the forward and the backward locks exists a strict locking
* hierarchy. The mutual exclusion zone protected by the forward lock
* can enclose the mutual exclusion zone protected by the backward lock
* but not vice versa.
*/
spinlock_t frwd_lock; /* protects session state, *
* cmdsn, queued_cmdsn *
* session resources: *
* - cmdpool, *
* - mgmtpool, *
* - r2tpool */
* - cmdpool kfifo_out , *
* - mgmtpool, */
spinlock_t back_lock; /* protects cmdsn_exp *
* cmdsn_max, *
* cmdpool kfifo_in */
int state; /* session state */
int age; /* counts session re-opens */
......
......@@ -83,6 +83,8 @@ struct iscsi_tcp_task {
struct iscsi_pool r2tpool;
struct kfifo r2tqueue;
void *dd_data;
spinlock_t pool2queue;
spinlock_t queue2pool;
};
enum {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册