“2d7f5c48c03ee53ad649cbf803dc33730f955234”上不存在“drivers/clocksource/arc_timer.c”
提交 fb5231b3 编写于 作者: C chenxiang 提交者: Xie XiuQi

scsi: hisi_sas: allocate number of CQ irq vectors according to cpu numbers

Currently if number of CQ irq vectors (fixed 16) are less than nr_cpus,
it will fail to allocate msi interrupt. So allocate number of CQ irq
vectors from 1 to 16.
And also if offline those cpus assocationed with CQ irq vectors, no
need to issue internal abort command to the queue, or internal abort
command will be timeout as there is no cpu processing the CQ interrupt.
Signed-off-by: NXiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: NJohn Garry <john.garry@huawei.com>

Feature or Bugfix:Bugfix
Signed-off-by: Nchenxiang (M) <chenxiang66@hisilicon.com>
Reviewed-by: Ntanxiaofei <tanxiaofei@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 82e9c59c
...@@ -281,6 +281,8 @@ struct hisi_sas_hw { ...@@ -281,6 +281,8 @@ struct hisi_sas_hw {
int delay_ms, int timeout_ms); int delay_ms, int timeout_ms);
void (*snapshot_prepare)(struct hisi_hba *hisi_hba); void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
void (*snapshot_restore)(struct hisi_hba *hisi_hba); void (*snapshot_restore)(struct hisi_hba *hisi_hba);
const struct cpumask *(*get_managed_irq_aff)(struct hisi_hba
*hisi_hba, int queue);
int max_command_entries; int max_command_entries;
int complete_hdr_size; int complete_hdr_size;
struct scsi_host_template *sht; struct scsi_host_template *sht;
...@@ -369,6 +371,7 @@ struct hisi_hba { ...@@ -369,6 +371,7 @@ struct hisi_hba {
bool user_ctl_irq; bool user_ctl_irq;
unsigned int reply_map[NR_CPUS]; unsigned int reply_map[NR_CPUS];
int nvecs;
}; };
/* Generic HW DMA host memory structures */ /* Generic HW DMA host memory structures */
......
...@@ -2088,7 +2088,23 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, ...@@ -2088,7 +2088,23 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
return _hisi_sas_internal_task_abort(hisi_hba, device, return _hisi_sas_internal_task_abort(hisi_hba, device,
abort_flag, tag, dq); abort_flag, tag, dq);
case HISI_SAS_INT_ABT_DEV: case HISI_SAS_INT_ABT_DEV:
for (i = 0; i < hisi_hba->queue_count; i++) { for (i = 0; i < hisi_hba->nvecs; i++) {
const struct cpumask *mask = NULL;
if (hisi_hba->hw->get_managed_irq_aff)
mask = hisi_hba->hw->get_managed_irq_aff(
hisi_hba, i);
/*
* The kernel will not permit unmanaged (MSI are
* managed) IRQ affinity to offline CPUs, so
* always issue internal abort on all queues
* in this case.
* For MSI interrupts, affinity may be set to
* offline CPUs, so ensure that there's an online
* CPU to handle the CQ interrupt.
*/
if (mask && !cpumask_intersects(cpu_online_mask, mask))
continue;
dq = &hisi_hba->dq[i]; dq = &hisi_hba->dq[i];
rc = _hisi_sas_internal_task_abort(hisi_hba, device, rc = _hisi_sas_internal_task_abort(hisi_hba, device,
abort_flag, tag, dq); abort_flag, tag, dq);
...@@ -2171,7 +2187,7 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) ...@@ -2171,7 +2187,7 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
{ {
int i; int i;
for (i = 0; i < hisi_hba->queue_count; i++) { for (i = 0; i < hisi_hba->nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct hisi_sas_cq *cq = &hisi_hba->cq[i];
tasklet_kill(&cq->tasklet); tasklet_kill(&cq->tasklet);
......
...@@ -3416,6 +3416,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) ...@@ -3416,6 +3416,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
} }
hisi_hba->nvecs = hisi_hba->queue_count;
return 0; return 0;
free_cq_int_irqs: free_cq_int_irqs:
......
...@@ -406,6 +406,7 @@ struct hisi_sas_err_record_v3 { ...@@ -406,6 +406,7 @@ struct hisi_sas_err_record_v3 {
#define T10_CHK_MSK_OFF 16 #define T10_CHK_MSK_OFF 16
#define HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW 16 #define HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW 16
#define HISI_SAS_MIN_VECTORS_V3_HW 17
#define HISI_SAS_IS_RW_CMD(op) \ #define HISI_SAS_IS_RW_CMD(op) \
((op == READ_6) || (op == WRITE_6) || \ ((op == READ_6) || (op == WRITE_6) || \
...@@ -2131,16 +2132,15 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) ...@@ -2131,16 +2132,15 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba) static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
{ {
const struct cpumask *mask; const struct cpumask *mask;
int queue, cpu; int queue, cpu;
for (queue = 0; queue < hisi_hba->queue_count; queue++) { for (queue = 0; queue < nvecs; queue++) {
mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue + 16); mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue + 16);
if (!mask) if (!mask)
goto fallback; goto fallback;
for_each_cpu(cpu, mask) for_each_cpu(cpu, mask)
hisi_hba->reply_map[cpu] = queue; hisi_hba->reply_map[cpu] = queue;
} }
...@@ -2167,16 +2167,21 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) ...@@ -2167,16 +2167,21 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
max_msi, PCI_IRQ_MSI); max_msi, PCI_IRQ_MSI);
} else { } else {
vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
max_msi, max_msi, HISI_SAS_MIN_VECTORS_V3_HW,
PCI_IRQ_MSI | PCI_IRQ_AFFINITY, &desc); max_msi,
setup_reply_map_v3_hw(hisi_hba); PCI_IRQ_MSI |
PCI_IRQ_AFFINITY,
&desc);
setup_reply_map_v3_hw(hisi_hba, vectors -
HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW);
} }
if (vectors < max_msi) { if (vectors < HISI_SAS_MIN_VECTORS_V3_HW) {
dev_err(dev, "could not allocate all msi (%d)\n", vectors); dev_err(dev, "allocate msi (%d) not enough\n", vectors);
return -ENOENT; return -ENOENT;
} }
hisi_hba->nvecs = vectors - HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW;
rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
int_phy_up_down_bcast_v3_hw, 0, int_phy_up_down_bcast_v3_hw, 0,
DRV_NAME " phy", hisi_hba); DRV_NAME " phy", hisi_hba);
...@@ -2205,7 +2210,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) ...@@ -2205,7 +2210,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
} }
/* Init tasklets for cq only */ /* Init tasklets for cq only */
for (i = 0; i < hisi_hba->queue_count; i++) { for (i = 0; i < hisi_hba->nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct hisi_sas_cq *cq = &hisi_hba->cq[i];
struct tasklet_struct *t = &cq->tasklet; struct tasklet_struct *t = &cq->tasklet;
int nr = hisi_sas_intr_conv ? 16 : 16 + i; int nr = hisi_sas_intr_conv ? 16 : 16 + i;
...@@ -2701,6 +2706,16 @@ static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) ...@@ -2701,6 +2706,16 @@ static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
} }
const struct cpumask *
get_managed_irq_aff_v3_hw(struct hisi_hba *hisi_hba, int queue)
{
if (user_ctl_irq)
return NULL;
return pci_irq_get_affinity(hisi_hba->pci_dev, queue +
HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW);
}
struct device_attribute *host_attrs_v3_hw[] = { struct device_attribute *host_attrs_v3_hw[] = {
&dev_attr_phy_event_threshold, &dev_attr_phy_event_threshold,
&dev_attr_intr_conv, &dev_attr_intr_conv,
...@@ -2764,6 +2779,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { ...@@ -2764,6 +2779,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.debugfs_reg_port = &debugfs_port_reg, .debugfs_reg_port = &debugfs_port_reg,
.snapshot_prepare = debugfs_snapshot_prepare_v3_hw, .snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
.snapshot_restore = debugfs_snapshot_restore_v3_hw, .snapshot_restore = debugfs_snapshot_restore_v3_hw,
.get_managed_irq_aff = get_managed_irq_aff_v3_hw,
}; };
static struct Scsi_Host * static struct Scsi_Host *
...@@ -2937,7 +2953,7 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) ...@@ -2937,7 +2953,7 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
free_irq(pci_irq_vector(pdev, 1), hisi_hba); free_irq(pci_irq_vector(pdev, 1), hisi_hba);
free_irq(pci_irq_vector(pdev, 2), hisi_hba); free_irq(pci_irq_vector(pdev, 2), hisi_hba);
free_irq(pci_irq_vector(pdev, 11), hisi_hba); free_irq(pci_irq_vector(pdev, 11), hisi_hba);
for (i = 0; i < hisi_hba->queue_count; i++) { for (i = 0; i < hisi_hba->nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct hisi_sas_cq *cq = &hisi_hba->cq[i];
int nr = hisi_sas_intr_conv ? 16 : 16 + i; int nr = hisi_sas_intr_conv ? 16 : 16 + i;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册