提交 04f9b4fe 编写于 作者: Y Yupeng Zhou 提交者: Xie XiuQi

hisi_sas: Adjust the DQ selection method.

driver inclusion
category: feature
bugzilla: NA
CVE: NA

Adjust the DQ selection method, select the DQ by numa node ID.
Signed-off-by: NYupeng Zhou <zhouyupeng1@huawei.com>
Reviewed-by: Nluojian <luojian5@huawei.com>
Reviewed-by: Nchenxiang <chenxiang66@hisilicon.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 dda7f15e
...@@ -410,8 +410,9 @@ struct hisi_hba { ...@@ -410,8 +410,9 @@ struct hisi_hba {
struct dentry *debugfs_bist_dentry; struct dentry *debugfs_bist_dentry;
bool user_ctl_irq; bool user_ctl_irq;
unsigned int reply_map[NR_CPUS]; unsigned int dq_idx[NR_CPUS];
int nvecs; int nvecs;
unsigned int dq_num_per_node;
}; };
/* Generic HW DMA host memory structures */ /* Generic HW DMA host memory structures */
......
...@@ -467,7 +467,10 @@ static int hisi_sas_task_prep(struct sas_task *task, ...@@ -467,7 +467,10 @@ static int hisi_sas_task_prep(struct sas_task *task,
struct hisi_sas_dq *dq; struct hisi_sas_dq *dq;
unsigned long flags; unsigned long flags;
int wr_q_index; int wr_q_index;
unsigned int dq_index = hisi_hba->reply_map[raw_smp_processor_id()]; unsigned int curr_node_id = numa_node_id();
unsigned int dq_index =
(hisi_hba->dq_idx[curr_node_id] % hisi_hba->dq_num_per_node) +
(hisi_hba->dq_num_per_node * curr_node_id);
if (DEV_IS_GONE(sas_dev)) { if (DEV_IS_GONE(sas_dev)) {
if (sas_dev) if (sas_dev)
...@@ -584,6 +587,7 @@ static int hisi_sas_task_prep(struct sas_task *task, ...@@ -584,6 +587,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
++(*pass); ++(*pass);
++hisi_hba->dq_idx[curr_node_id];
WRITE_ONCE(slot->ready, 1); WRITE_ONCE(slot->ready, 1);
return 0; return 0;
...@@ -2328,6 +2332,11 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) ...@@ -2328,6 +2332,11 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
sema_init(&hisi_hba->sem, 1); sema_init(&hisi_hba->sem, 1);
spin_lock_init(&hisi_hba->lock); spin_lock_init(&hisi_hba->lock);
hisi_hba->dq_num_per_node = hisi_hba->queue_count/num_online_nodes();
for (i = 0; i < NR_CPUS; i++)
hisi_hba->dq_idx[i] = 0;
for (i = 0; i < hisi_hba->n_phy; i++) { for (i = 0; i < hisi_hba->n_phy; i++) {
hisi_sas_phy_init(hisi_hba, i); hisi_sas_phy_init(hisi_hba, i);
hisi_hba->port[i].port_attached = 0; hisi_hba->port[i].port_attached = 0;
......
...@@ -2528,27 +2528,6 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) ...@@ -2528,27 +2528,6 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
{
const struct cpumask *mask;
int queue, cpu;
for (queue = 0; queue < nvecs; queue++) {
mask = pci_irq_get_affinity(hisi_hba->pci_dev,
queue +
HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
hisi_hba->reply_map[cpu] = queue;
}
return;
fallback:
for_each_possible_cpu(cpu)
hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count;
}
static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
{ {
struct device *dev = hisi_hba->dev; struct device *dev = hisi_hba->dev;
...@@ -2570,8 +2549,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) ...@@ -2570,8 +2549,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
PCI_IRQ_MSI | PCI_IRQ_MSI |
PCI_IRQ_AFFINITY, PCI_IRQ_AFFINITY,
&desc); &desc);
setup_reply_map_v3_hw(hisi_hba, vectors -
HISI_SAS_CQ_INT_BASE_VECTORS_V3_HW);
} }
if (vectors < HISI_SAS_MIN_VECTORS_V3_HW) { if (vectors < HISI_SAS_MIN_VECTORS_V3_HW) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册