diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 75776b513cf00915f0324b31fd3598345dc145a1..c25c79b46a1816a792764c39664a812d072b53ac 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -281,6 +281,8 @@ struct hisi_sas_hw { int delay_ms, int timeout_ms); void (*snapshot_prepare)(struct hisi_hba *hisi_hba); void (*snapshot_restore)(struct hisi_hba *hisi_hba); + const struct cpumask *(*get_managed_irq_aff)(struct hisi_hba + *hisi_hba, int queue); int max_command_entries; int complete_hdr_size; struct scsi_host_template *sht; @@ -369,6 +371,7 @@ struct hisi_hba { bool user_ctl_irq; unsigned int reply_map[NR_CPUS]; + int nvecs; }; /* Generic HW DMA host memory structures */ diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index bd08776b238d337f3305d570c1837b671d63a5b3..3bdbd04d15cda4ec263339a77e661f4f4609c96e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -2088,7 +2088,23 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, return _hisi_sas_internal_task_abort(hisi_hba, device, abort_flag, tag, dq); case HISI_SAS_INT_ABT_DEV: - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->nvecs; i++) { + const struct cpumask *mask = NULL; + + if (hisi_hba->hw->get_managed_irq_aff) + mask = hisi_hba->hw->get_managed_irq_aff( + hisi_hba, i); + /* + * The kernel will not permit unmanaged (MSI are + * managed) IRQ affinity to offline CPUs, so + * always issue internal abort on all queues + * in this case. + * For MSI interrupts, affinity may be set to + * offline CPUs, so ensure that there's an online + * CPU to handle the CQ interrupt. + */ + if (mask && !cpumask_intersects(cpu_online_mask, mask)) + continue; dq = &hisi_hba->dq[i]; rc = _hisi_sas_internal_task_abort(hisi_hba, device, abort_flag, tag, dq); @@ -2171,7 +2187,7 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) { int i; - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; tasklet_kill(&cq->tasklet); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index f19f252bf7448805206681946132e9c44d8e54b5..b22b0ab40d5ddae89f228619b99ed950a9d745dc 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -3416,6 +3416,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); } + hisi_hba->nvecs = hisi_hba->queue_count; return 0; free_cq_int_irqs: diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index cc0e5ba4d17ff40662a4b7bfe8fc8ae59f2ac4d6..d6ca4f12e87fe67e92c8ba67e61fcbe15c1236e8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -406,6 +406,7 @@ struct hisi_sas_err_record_v3 { #define T10_CHK_MSK_OFF 16 #define HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW 16 +#define HISI_SAS_MIN_VECTORS_V3_HW 17 #define HISI_SAS_IS_RW_CMD(op) \ ((op == READ_6) || (op == WRITE_6) || \ @@ -2131,16 +2132,15 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) return IRQ_HANDLED; } -static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba) +static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs) { const struct cpumask *mask; int queue, cpu; - for (queue = 0; queue < hisi_hba->queue_count; queue++) { + for (queue = 0; queue < nvecs; queue++) { mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue + 16); if (!mask) goto fallback; - for_each_cpu(cpu, mask) hisi_hba->reply_map[cpu] = queue; } @@ -2167,16 +2167,21 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) max_msi, PCI_IRQ_MSI); } else { vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, - max_msi, max_msi, - PCI_IRQ_MSI | PCI_IRQ_AFFINITY, &desc); - setup_reply_map_v3_hw(hisi_hba); + HISI_SAS_MIN_VECTORS_V3_HW, + max_msi, + PCI_IRQ_MSI | + PCI_IRQ_AFFINITY, + &desc); + setup_reply_map_v3_hw(hisi_hba, vectors - + HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW); } - if (vectors < max_msi) { - dev_err(dev, "could not allocate all msi (%d)\n", vectors); + if (vectors < HISI_SAS_MIN_VECTORS_V3_HW) { + dev_err(dev, "allocate msi (%d) not enough\n", vectors); return -ENOENT; } + hisi_hba->nvecs = vectors - HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW; rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), int_phy_up_down_bcast_v3_hw, 0, DRV_NAME " phy", hisi_hba); @@ -2205,7 +2210,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) } /* Init tasklets for cq only */ - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct tasklet_struct *t = &cq->tasklet; int nr = hisi_sas_intr_conv ? 16 : 16 + i; @@ -2701,6 +2706,16 @@ static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); } +const struct cpumask * +get_managed_irq_aff_v3_hw(struct hisi_hba *hisi_hba, int queue) +{ + if (user_ctl_irq) + return NULL; + + return pci_irq_get_affinity(hisi_hba->pci_dev, queue + + HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW); +} + struct device_attribute *host_attrs_v3_hw[] = { &dev_attr_phy_event_threshold, &dev_attr_intr_conv, @@ -2764,6 +2779,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .debugfs_reg_port = &debugfs_port_reg, .snapshot_prepare = debugfs_snapshot_prepare_v3_hw, .snapshot_restore = debugfs_snapshot_restore_v3_hw, + .get_managed_irq_aff = get_managed_irq_aff_v3_hw, }; static struct Scsi_Host * @@ -2937,7 +2953,7 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) free_irq(pci_irq_vector(pdev, 1), hisi_hba); free_irq(pci_irq_vector(pdev, 2), hisi_hba); free_irq(pci_irq_vector(pdev, 11), hisi_hba); - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; int nr = hisi_sas_intr_conv ? 16 : 16 + i;