提交 e9684678 编写于 作者: B Bart Van Assche 提交者: David Dillow

IB/srp: stop sharing the host lock with SCSI

We don't need protection against the SCSI stack, so use our own lock to
allow parallel progress on separate CPUs.
Signed-off-by: NBart Van Assche <bvanassche@acm.org>
[ broken out and small cleanups by David Dillow ]
Signed-off-by: NDavid Dillow <dillowda@ornl.gov>
上级 94a9174c
...@@ -447,12 +447,12 @@ static bool srp_change_state(struct srp_target_port *target, ...@@ -447,12 +447,12 @@ static bool srp_change_state(struct srp_target_port *target,
{ {
bool changed = false; bool changed = false;
spin_lock_irq(target->scsi_host->host_lock); spin_lock_irq(&target->lock);
if (target->state == old) { if (target->state == old) {
target->state = new; target->state = new;
changed = true; changed = true;
} }
spin_unlock_irq(target->scsi_host->host_lock); spin_unlock_irq(&target->lock);
return changed; return changed;
} }
...@@ -555,11 +555,11 @@ static void srp_remove_req(struct srp_target_port *target, ...@@ -555,11 +555,11 @@ static void srp_remove_req(struct srp_target_port *target,
unsigned long flags; unsigned long flags;
srp_unmap_data(req->scmnd, target, req); srp_unmap_data(req->scmnd, target, req);
spin_lock_irqsave(target->scsi_host->host_lock, flags); spin_lock_irqsave(&target->lock, flags);
target->req_lim += req_lim_delta; target->req_lim += req_lim_delta;
req->scmnd = NULL; req->scmnd = NULL;
list_add_tail(&req->list, &target->free_reqs); list_add_tail(&req->list, &target->free_reqs);
spin_unlock_irqrestore(target->scsi_host->host_lock, flags); spin_unlock_irqrestore(&target->lock, flags);
} }
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
...@@ -634,13 +634,13 @@ static int srp_reconnect_target(struct srp_target_port *target) ...@@ -634,13 +634,13 @@ static int srp_reconnect_target(struct srp_target_port *target)
* Schedule our work inside the lock to avoid a race with * Schedule our work inside the lock to avoid a race with
* the flush_scheduled_work() in srp_remove_one(). * the flush_scheduled_work() in srp_remove_one().
*/ */
spin_lock_irq(target->scsi_host->host_lock); spin_lock_irq(&target->lock);
if (target->state == SRP_TARGET_CONNECTING) { if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD; target->state = SRP_TARGET_DEAD;
INIT_WORK(&target->work, srp_remove_work); INIT_WORK(&target->work, srp_remove_work);
schedule_work(&target->work); schedule_work(&target->work);
} }
spin_unlock_irq(target->scsi_host->host_lock); spin_unlock_irq(&target->lock);
return ret; return ret;
} }
...@@ -829,17 +829,16 @@ static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, ...@@ -829,17 +829,16 @@ static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(target->scsi_host->host_lock, flags); spin_lock_irqsave(&target->lock, flags);
list_add(&iu->list, &target->free_tx); list_add(&iu->list, &target->free_tx);
if (iu_type != SRP_IU_RSP) if (iu_type != SRP_IU_RSP)
++target->req_lim; ++target->req_lim;
spin_unlock_irqrestore(target->scsi_host->host_lock, flags); spin_unlock_irqrestore(&target->lock, flags);
} }
/* /*
* Must be called with target->scsi_host->host_lock held to protect * Must be called with target->lock held to protect req_lim and free_tx.
* req_lim and free_tx. If IU is not sent, it must be returned using * If IU is not sent, it must be returned using srp_put_tx_iu().
* srp_put_tx_iu().
* *
* Note: * Note:
* An upper limit for the number of allocated information units for each * An upper limit for the number of allocated information units for each
...@@ -920,9 +919,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) ...@@ -920,9 +919,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
unsigned long flags; unsigned long flags;
if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
spin_lock_irqsave(target->scsi_host->host_lock, flags); spin_lock_irqsave(&target->lock, flags);
target->req_lim += be32_to_cpu(rsp->req_lim_delta); target->req_lim += be32_to_cpu(rsp->req_lim_delta);
spin_unlock_irqrestore(target->scsi_host->host_lock, flags); spin_unlock_irqrestore(&target->lock, flags);
target->tsk_mgmt_status = -1; target->tsk_mgmt_status = -1;
if (be32_to_cpu(rsp->resp_data_len) >= 4) if (be32_to_cpu(rsp->resp_data_len) >= 4)
...@@ -963,10 +962,10 @@ static int srp_response_common(struct srp_target_port *target, s32 req_delta, ...@@ -963,10 +962,10 @@ static int srp_response_common(struct srp_target_port *target, s32 req_delta,
struct srp_iu *iu; struct srp_iu *iu;
int err; int err;
spin_lock_irqsave(target->scsi_host->host_lock, flags); spin_lock_irqsave(&target->lock, flags);
target->req_lim += req_delta; target->req_lim += req_delta;
iu = __srp_get_tx_iu(target, SRP_IU_RSP); iu = __srp_get_tx_iu(target, SRP_IU_RSP);
spin_unlock_irqrestore(target->scsi_host->host_lock, flags); spin_unlock_irqrestore(&target->lock, flags);
if (!iu) { if (!iu) {
shost_printk(KERN_ERR, target->scsi_host, PFX shost_printk(KERN_ERR, target->scsi_host, PFX
...@@ -1131,14 +1130,14 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) ...@@ -1131,14 +1130,14 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
return 0; return 0;
} }
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(&target->lock, flags);
iu = __srp_get_tx_iu(target, SRP_IU_CMD); iu = __srp_get_tx_iu(target, SRP_IU_CMD);
if (iu) { if (iu) {
req = list_first_entry(&target->free_reqs, struct srp_request, req = list_first_entry(&target->free_reqs, struct srp_request,
list); list);
list_del(&req->list); list_del(&req->list);
} }
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(&target->lock, flags);
if (!iu) if (!iu)
goto err; goto err;
...@@ -1184,9 +1183,9 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) ...@@ -1184,9 +1183,9 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
err_iu: err_iu:
srp_put_tx_iu(target, iu, SRP_IU_CMD); srp_put_tx_iu(target, iu, SRP_IU_CMD);
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(&target->lock, flags);
list_add(&req->list, &target->free_reqs); list_add(&req->list, &target->free_reqs);
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(&target->lock, flags);
err: err:
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
...@@ -1451,9 +1450,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, ...@@ -1451,9 +1450,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
init_completion(&target->tsk_mgmt_done); init_completion(&target->tsk_mgmt_done);
spin_lock_irq(target->scsi_host->host_lock); spin_lock_irq(&target->lock);
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
spin_unlock_irq(target->scsi_host->host_lock); spin_unlock_irq(&target->lock);
if (!iu) if (!iu)
return -1; return -1;
...@@ -1957,6 +1956,7 @@ static ssize_t srp_create_target(struct device *dev, ...@@ -1957,6 +1956,7 @@ static ssize_t srp_create_target(struct device *dev,
target->scsi_host = target_host; target->scsi_host = target_host;
target->srp_host = host; target->srp_host = host;
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->free_tx); INIT_LIST_HEAD(&target->free_tx);
INIT_LIST_HEAD(&target->free_reqs); INIT_LIST_HEAD(&target->free_reqs);
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
...@@ -2186,9 +2186,9 @@ static void srp_remove_one(struct ib_device *device) ...@@ -2186,9 +2186,9 @@ static void srp_remove_one(struct ib_device *device)
*/ */
spin_lock(&host->target_lock); spin_lock(&host->target_lock);
list_for_each_entry(target, &host->target_list, list) { list_for_each_entry(target, &host->target_list, list) {
spin_lock_irq(target->scsi_host->host_lock); spin_lock_irq(&target->lock);
target->state = SRP_TARGET_REMOVED; target->state = SRP_TARGET_REMOVED;
spin_unlock_irq(target->scsi_host->host_lock); spin_unlock_irq(&target->lock);
} }
spin_unlock(&host->target_lock); spin_unlock(&host->target_lock);
......
...@@ -144,6 +144,8 @@ struct srp_target_port { ...@@ -144,6 +144,8 @@ struct srp_target_port {
struct srp_iu *rx_ring[SRP_RQ_SIZE]; struct srp_iu *rx_ring[SRP_RQ_SIZE];
spinlock_t lock;
struct list_head free_tx; struct list_head free_tx;
struct srp_iu *tx_ring[SRP_SQ_SIZE]; struct srp_iu *tx_ring[SRP_SQ_SIZE];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册