From 92f699cd4b50add131623e1b81b71665fd1a6acd Mon Sep 17 00:00:00 2001 From: chenxiang Date: Sun, 13 Jan 2019 15:56:10 +0800 Subject: [PATCH] scsi: hisi_sas: optimise the performance of single disk For auto-control irq affinity mode, choose the dq to delivery IO according to the CPU of deliverying IO thread. Then it decreases the performance regression that fio and CQ interrupts are processed on different node. For user control irq affinity mode, keep it as before. To realize it, also need to distinguish the usage of dq lock and sas_dev lock. Signed-off-by: Xiang Chen Feature or Bugfix:Bugfix Signed-off-by: chenxiang (M) Reviewed-by: tanxiaofei Signed-off-by: Yang Yingliang --- drivers/scsi/hisi_sas/hisi_sas.h | 5 +++++ drivers/scsi/hisi_sas/hisi_sas_main.c | 25 ++++++++++++++++++------- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 1 + drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 22 ++++++++++++++++++++++ 4 files changed, 46 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 86cbc8c96c4d..e3b4d5a25c54 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -185,6 +185,7 @@ struct hisi_sas_device { enum sas_device_type dev_type; int device_id; int sata_idx; + spinlock_t lock; }; struct hisi_sas_tmf_task { @@ -207,6 +208,7 @@ struct hisi_sas_slot { int cmplt_queue_slot; int abort; int ready; + int device_id; void *cmd_hdr; dma_addr_t cmd_hdr_dma; struct timer_list internal_abort_timer; @@ -365,6 +367,9 @@ struct hisi_hba { struct dentry *debugfs_dir; struct dentry *dump_dentry; + + bool user_ctl_irq; + unsigned int reply_map[NR_CPUS]; }; /* Generic HW DMA host memory structures */ diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index f048f9063a0d..5decd6d4b8ee 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -242,8 +242,9 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { - struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue]; unsigned long flags; + int device_id = slot->device_id; + struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; if (task) { struct device *dev = hisi_hba->dev; @@ -270,9 +271,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, } } - spin_lock_irqsave(&dq->lock, flags); + spin_lock_irqsave(&sas_dev->lock, flags); list_del_init(&slot->entry); - spin_unlock_irqrestore(&dq->lock, flags); + spin_unlock_irqrestore(&sas_dev->lock, flags); memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); @@ -462,6 +463,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq *dq; unsigned long flags; int wr_q_index; + unsigned int dq_index = hisi_hba->reply_map[raw_smp_processor_id()]; if (DEV_IS_GONE(sas_dev)) { if (sas_dev) @@ -474,7 +476,10 @@ static int hisi_sas_task_prep(struct sas_task *task, return -ECOMM; } - *dq_pointer = dq = sas_dev->dq; + if (hisi_hba->user_ctl_irq) + *dq_pointer = dq = sas_dev->dq; + else + *dq_pointer = dq = &hisi_hba->dq[dq_index]; port = to_hisi_sas_port(sas_port); if (port && !port->port_attached) { @@ -486,7 +491,6 @@ static int hisi_sas_task_prep(struct sas_task *task, return -ECOMM; } - rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, &n_elem_req, &n_elem_resp); if (rc < 0) @@ -528,12 +532,15 @@ static int hisi_sas_task_prep(struct sas_task *task, } list_add_tail(&slot->delivery, &dq->list); - list_add_tail(&slot->entry, &sas_dev->list); spin_unlock_irqrestore(&dq->lock, flags); + spin_lock_irqsave(&sas_dev->lock, flags); + list_add_tail(&slot->entry, &sas_dev->list); + spin_unlock_irqrestore(&sas_dev->lock, flags); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; + slot->device_id = sas_dev->device_id; slot->n_elem = n_elem; slot->n_elem_dif = n_elem_dif; slot->dlvry_queue = dlvry_queue; @@ -702,6 +709,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; sas_dev->dq = dq; + spin_lock_init(&sas_dev->lock); INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } @@ -1916,10 +1924,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, } list_add_tail(&slot->delivery, &dq->list); spin_unlock_irqrestore(&dq->lock, flags_dq); + spin_lock_irqsave(&sas_dev->lock, flags); + list_add_tail(&slot->entry, &sas_dev->list); + spin_unlock_irqrestore(&sas_dev->lock, flags); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; + slot->device_id = sas_dev->device_id; slot->n_elem = n_elem; slot->dlvry_queue = dlvry_queue; slot->dlvry_queue_slot = dlvry_queue_slot; @@ -1943,7 +1955,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, WRITE_ONCE(slot->ready, 1); /* send abort command to the chip */ spin_lock_irqsave(&dq->lock, flags); - list_add_tail(&slot->entry, &sas_dev->list); hisi_hba->hw->start_delivery(dq); spin_unlock_irqrestore(&dq->lock, flags); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 09726691bd95..17125a869bad 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -871,6 +871,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) sas_dev->sas_device = device; sas_dev->sata_idx = sata_idx; sas_dev->dq = dq; + spin_lock_init(&sas_dev->lock); INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 7c896ddd9c73..922ff1002c01 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2138,6 +2138,26 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) return IRQ_HANDLED; } +static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba) +{ + const struct cpumask *mask; + int queue, cpu; + + for (queue = 0; queue < hisi_hba->queue_count; queue++) { + mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue + 16); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + hisi_hba->reply_map[cpu] = queue; + } + return; + +fallback: + for_each_possible_cpu(cpu) + hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count; +} + static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; @@ -2156,6 +2176,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, max_msi, max_msi, PCI_IRQ_MSI | PCI_IRQ_AFFINITY, &desc); + setup_reply_map_v3_hw(hisi_hba); } if (vectors < max_msi) { @@ -2774,6 +2795,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) hisi_hba->shost = shost; SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; hisi_hba->enable_dix_dif = enable_dix_dif; + hisi_hba->user_ctl_irq = user_ctl_irq; timer_setup(&hisi_hba->timer, NULL, 0); -- GitLab