提交 a4ab6137 编写于 作者: R Ron Mercer 提交者: David S. Miller

qlge: Allow running MSIx with fewer vectors.

Currently we downshift to MSI/Legacy if we don't get enough vectors for
cpu_count RSS rings plus cpu_count TX completion rings.  This patch
allows running MSIX with the vector count that the platform provides.
Signed-off-by: NRon Mercer <ron.mercer@qlogic.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 b2014ff8
...@@ -2815,17 +2815,20 @@ static void ql_disable_msix(struct ql_adapter *qdev) ...@@ -2815,17 +2815,20 @@ static void ql_disable_msix(struct ql_adapter *qdev)
} }
} }
/* We start by trying to get the number of vectors
* stored in qdev->intr_count. If we don't get that
* many then we reduce the count and try again.
*/
static void ql_enable_msix(struct ql_adapter *qdev) static void ql_enable_msix(struct ql_adapter *qdev)
{ {
int i; int i, err;
qdev->intr_count = 1;
/* Get the MSIX vectors. */ /* Get the MSIX vectors. */
if (irq_type == MSIX_IRQ) { if (irq_type == MSIX_IRQ) {
/* Try to alloc space for the msix struct, /* Try to alloc space for the msix struct,
* if it fails then go to MSI/legacy. * if it fails then go to MSI/legacy.
*/ */
qdev->msi_x_entry = kcalloc(qdev->rx_ring_count, qdev->msi_x_entry = kcalloc(qdev->intr_count,
sizeof(struct msix_entry), sizeof(struct msix_entry),
GFP_KERNEL); GFP_KERNEL);
if (!qdev->msi_x_entry) { if (!qdev->msi_x_entry) {
...@@ -2833,26 +2836,36 @@ static void ql_enable_msix(struct ql_adapter *qdev) ...@@ -2833,26 +2836,36 @@ static void ql_enable_msix(struct ql_adapter *qdev)
goto msi; goto msi;
} }
for (i = 0; i < qdev->rx_ring_count; i++) for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i; qdev->msi_x_entry[i].entry = i;
if (!pci_enable_msix /* Loop to get our vectors. We start with
(qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) { * what we want and settle for what we get.
set_bit(QL_MSIX_ENABLED, &qdev->flags); */
qdev->intr_count = qdev->rx_ring_count; do {
QPRINTK(qdev, IFUP, DEBUG, err = pci_enable_msix(qdev->pdev,
"MSI-X Enabled, got %d vectors.\n", qdev->msi_x_entry, qdev->intr_count);
qdev->intr_count); if (err > 0)
return; qdev->intr_count = err;
} else { } while (err > 0);
if (err < 0) {
kfree(qdev->msi_x_entry); kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL; qdev->msi_x_entry = NULL;
QPRINTK(qdev, IFUP, WARNING, QPRINTK(qdev, IFUP, WARNING,
"MSI-X Enable failed, trying MSI.\n"); "MSI-X Enable failed, trying MSI.\n");
qdev->intr_count = 1;
irq_type = MSI_IRQ; irq_type = MSI_IRQ;
} else if (err == 0) {
set_bit(QL_MSIX_ENABLED, &qdev->flags);
QPRINTK(qdev, IFUP, INFO,
"MSI-X Enabled, got %d vectors.\n",
qdev->intr_count);
return;
} }
} }
msi: msi:
qdev->intr_count = 1;
if (irq_type == MSI_IRQ) { if (irq_type == MSI_IRQ) {
if (!pci_enable_msi(qdev->pdev)) { if (!pci_enable_msi(qdev->pdev)) {
set_bit(QL_MSI_ENABLED, &qdev->flags); set_bit(QL_MSI_ENABLED, &qdev->flags);
...@@ -2876,8 +2889,6 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) ...@@ -2876,8 +2889,6 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
int i = 0; int i = 0;
struct intr_context *intr_context = &qdev->intr_context[0]; struct intr_context *intr_context = &qdev->intr_context[0];
ql_enable_msix(qdev);
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Each rx_ring has it's /* Each rx_ring has it's
* own intr_context since we have separate * own intr_context since we have separate
...@@ -3438,40 +3449,20 @@ static int ql_configure_rings(struct ql_adapter *qdev) ...@@ -3438,40 +3449,20 @@ static int ql_configure_rings(struct ql_adapter *qdev)
int i; int i;
struct rx_ring *rx_ring; struct rx_ring *rx_ring;
struct tx_ring *tx_ring; struct tx_ring *tx_ring;
int cpu_cnt = num_online_cpus(); int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
/* /* In a perfect world we have one RSS ring for each CPU
* For each processor present we allocate one * and each has it's own vector. To do that we ask for
* rx_ring for outbound completions, and one * cpu_cnt vectors. ql_enable_msix() will adjust the
* rx_ring for inbound completions. Plus there is * vector count to what we actually get. We then
* always the one default queue. For the CPU * allocate an RSS ring for each.
* counts we end up with the following rx_rings: * Essentially, we are doing min(cpu_count, msix_vector_count).
* rx_ring count =
* one default queue +
* (CPU count * outbound completion rx_ring) +
* (CPU count * inbound (RSS) completion rx_ring)
* To keep it simple we limit the total number of
* queues to < 32, so we truncate CPU to 8.
* This limitation can be removed when requested.
*/ */
qdev->intr_count = cpu_cnt;
if (cpu_cnt > MAX_CPUS) ql_enable_msix(qdev);
cpu_cnt = MAX_CPUS; /* Adjust the RSS ring count to the actual vector count. */
qdev->rss_ring_count = qdev->intr_count;
/*
* rx_ring[0] is always the default queue.
*/
/* Allocate outbound completion ring for each CPU. */
qdev->tx_ring_count = cpu_cnt; qdev->tx_ring_count = cpu_cnt;
/* Allocate inbound completion (RSS) ring for each CPU. */
qdev->rss_ring_count = cpu_cnt;
/*
* qdev->rx_ring_count:
* Total number of rx_rings. This includes the one
* default queue, a number of outbound completion
* handler rx_rings, and the number of inbound
* completion handler rx_rings.
*/
qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
for (i = 0; i < qdev->tx_ring_count; i++) { for (i = 0; i < qdev->tx_ring_count; i++) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册