提交 6c157f6e 编写于 作者: D David S. Miller

Merge branch 'ena-set_channels'

Sameeh Jubran says:

====================
ena: Support ethtool set_channels

Difference from v2:
* ethtool's set/get channels: Switched to using combined instead of
  separate rx/tx
* Fixed error handling in set_channels
* Fixed indentation and cosmetic issues as requested by Jakub Kicinski

Difference from v1:
* Dropped the print from patch 0002 - "net: ena: multiple queue creation
  related cleanups" as requested by David Miller
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -133,7 +133,7 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
u64 *ptr;
int i, j;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
/* Tx stats */
ring = &adapter->tx_ring[i];
......@@ -205,7 +205,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
}
......@@ -214,7 +214,7 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
const struct ena_stats *ena_stats;
int i, j;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
/* Tx stats */
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
......@@ -333,7 +333,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter)
val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
adapter->tx_ring[i].smoothed_interval = val;
}
......@@ -344,7 +344,7 @@ static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter)
val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev);
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
adapter->rx_ring[i].smoothed_interval = val;
}
......@@ -612,7 +612,7 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = adapter->num_queues;
info->data = adapter->num_io_queues;
rc = 0;
break;
case ETHTOOL_GRXFH:
......@@ -734,14 +734,20 @@ static void ena_get_channels(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
channels->max_rx = adapter->num_queues;
channels->max_tx = adapter->num_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->num_queues;
channels->tx_count = adapter->num_queues;
channels->other_count = 0;
channels->combined_count = 0;
channels->max_combined = adapter->max_num_io_queues;
channels->combined_count = adapter->num_io_queues;
}
static int ena_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ena_adapter *adapter = netdev_priv(netdev);
u32 count = channels->combined_count;
/* The check for max value is already done in ethtool */
if (count < ENA_MIN_NUM_IO_QUEUES)
return -EINVAL;
return ena_update_queue_count(adapter, count);
}
static int ena_get_tunable(struct net_device *netdev,
......@@ -807,6 +813,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
.get_channels = ena_get_channels,
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
};
......
......@@ -101,7 +101,7 @@ static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
{
int i;
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
adapter->rx_ring[i].mtu = mtu;
}
......@@ -129,10 +129,10 @@ static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
u32 i;
int rc;
adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
if (!adapter->netdev->rx_cpu_rmap)
return -ENOMEM;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
int irq_idx = ENA_IO_IRQ_IDX(i);
rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
......@@ -172,7 +172,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
ena_dev = adapter->ena_dev;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
txr = &adapter->tx_ring[i];
rxr = &adapter->rx_ring[i];
......@@ -294,7 +294,7 @@ static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_tx_resources(adapter, i);
if (rc)
goto err_setup_tx;
......@@ -322,7 +322,7 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
ena_free_tx_resources(adapter, i);
}
......@@ -428,7 +428,7 @@ static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
{
int i, rc = 0;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_setup_rx_resources(adapter, i);
if (rc)
goto err_setup_rx;
......@@ -456,7 +456,7 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_resources(adapter, i);
}
......@@ -600,7 +600,7 @@ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
struct ena_ring *rx_ring;
int i, rc, bufs_num;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
bufs_num = rx_ring->ring_size - 1;
rc = ena_refill_rx_bufs(rx_ring, bufs_num);
......@@ -616,7 +616,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
ena_free_rx_bufs(adapter, i);
}
......@@ -688,7 +688,7 @@ static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
struct ena_ring *tx_ring;
int i;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
ena_free_tx_bufs(tx_ring);
}
......@@ -699,7 +699,7 @@ static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
......@@ -710,7 +710,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
u16 ena_qid;
int i;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
......@@ -1331,7 +1331,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
* the number of potential io queues is the minimum of what the device
* supports and the number of vCPUs.
*/
static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
static int ena_enable_msix(struct ena_adapter *adapter)
{
int msix_vecs, irq_cnt;
......@@ -1342,7 +1342,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
}
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
......@@ -1359,7 +1359,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_notice(adapter, probe, adapter->netdev,
"enable only %d MSI-X (out of %d), reduce the number of queues\n",
irq_cnt, msix_vecs);
adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
if (ena_init_rx_cpu_rmap(adapter))
......@@ -1397,7 +1397,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
netdev = adapter->netdev;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
cpu = i % num_online_cpus();
......@@ -1529,7 +1529,7 @@ static void ena_del_napi(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
netif_napi_del(&adapter->ena_napi[i].napi);
}
......@@ -1538,7 +1538,7 @@ static void ena_init_napi(struct ena_adapter *adapter)
struct ena_napi *napi;
int i;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
napi = &adapter->ena_napi[i];
netif_napi_add(adapter->netdev,
......@@ -1555,7 +1555,7 @@ static void ena_napi_disable_all(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
napi_disable(&adapter->ena_napi[i].napi);
}
......@@ -1563,7 +1563,7 @@ static void ena_napi_enable_all(struct ena_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
napi_enable(&adapter->ena_napi[i].napi);
}
......@@ -1673,7 +1673,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_create_io_tx_queue(adapter, i);
if (rc)
goto create_err;
......@@ -1741,7 +1741,7 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
rc = ena_create_io_rx_queue(adapter, i);
if (rc)
goto create_err;
......@@ -1764,7 +1764,7 @@ static void set_io_rings_size(struct ena_adapter *adapter,
{
int i;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
adapter->tx_ring[i].ring_size = new_tx_size;
adapter->rx_ring[i].ring_size = new_rx_size;
}
......@@ -1902,14 +1902,14 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
/* Enable completion queues interrupt */
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
ena_unmask_interrupt(&adapter->tx_ring[i],
&adapter->rx_ring[i]);
/* schedule napi in case we had pending packets
* from the last time we disable napi
*/
for (i = 0; i < adapter->num_queues; i++)
for (i = 0; i < adapter->num_io_queues; i++)
napi_schedule(&adapter->ena_napi[i].napi);
return rc;
......@@ -1984,13 +1984,13 @@ static int ena_open(struct net_device *netdev)
int rc;
/* Notify the stack of the actual queue counts. */
rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
if (rc) {
netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
return rc;
}
rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
if (rc) {
netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
return rc;
......@@ -2043,14 +2043,30 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size)
{
bool dev_up;
bool dev_was_up;
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
adapter->requested_tx_ring_size = new_tx_size;
adapter->requested_rx_ring_size = new_rx_size;
ena_init_io_rings(adapter);
return dev_up ? ena_up(adapter) : 0;
return dev_was_up ? ena_up(adapter) : 0;
}
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_was_up;
dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
adapter->num_io_queues = new_channel_count;
/* We need to destroy the rss table so that the indirection
* table will be reinitialized by ena_up()
*/
ena_com_rss_destroy(ena_dev);
ena_init_io_rings(adapter);
return dev_was_up ? ena_open(adapter->netdev) : 0;
}
static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
......@@ -2495,7 +2511,7 @@ static void ena_get_stats64(struct net_device *netdev,
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
u64 bytes, packets;
tx_ring = &adapter->tx_ring[i];
......@@ -2682,14 +2698,13 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
return rc;
}
static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
int io_vectors)
static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
struct device *dev = &adapter->pdev->dev;
int rc;
rc = ena_enable_msix(adapter, io_vectors);
rc = ena_enable_msix(adapter);
if (rc) {
dev_err(dev, "Can not reserve msix vectors\n");
return rc;
......@@ -2782,8 +2797,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) {
dev_err(&pdev->dev, "Enable MSI-X failed\n");
goto err_device_destroy;
......@@ -2948,7 +2962,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
budget = ENA_MONITORED_TX_QUEUES;
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
for (i = adapter->last_monitored_tx_qid; i < adapter->num_io_queues; i++) {
tx_ring = &adapter->tx_ring[i];
rx_ring = &adapter->rx_ring[i];
......@@ -2965,7 +2979,7 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
break;
}
adapter->last_monitored_tx_qid = i % adapter->num_queues;
adapter->last_monitored_tx_qid = i % adapter->num_io_queues;
}
/* trigger napi schedule after 2 consecutive detections */
......@@ -2995,7 +3009,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
for (i = 0; i < adapter->num_queues; i++) {
for (i = 0; i < adapter->num_io_queues; i++) {
rx_ring = &adapter->rx_ring[i];
refill_required =
......@@ -3137,16 +3151,16 @@ static void ena_timer_service(struct timer_list *t)
mod_timer(&adapter->timer_service, jiffies + HZ);
}
static int ena_calc_io_queue_num(struct pci_dev *pdev,
struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&get_feat_ctx->max_queue_ext.max_queue_ext;
io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
max_queue_ext->max_rx_cq_num);
io_tx_sq_num = max_queue_ext->max_tx_sq_num;
......@@ -3156,25 +3170,25 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
&get_feat_ctx->max_queues;
io_tx_sq_num = max_queues->max_sq_num;
io_tx_cq_num = max_queues->max_cq_num;
io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
}
/* In case of LLQ use the llq fields for the tx SQ/CQ */
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
io_queue_num = min_t(int, io_queue_num, io_rx_num);
io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
if (unlikely(!io_queue_num)) {
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n");
return -EFAULT;
}
return io_queue_num;
return max_num_io_queues;
}
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
......@@ -3302,7 +3316,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
val = ethtool_rxfh_indir_default(i, adapter->num_queues);
val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
......@@ -3349,7 +3363,7 @@ static void set_default_llq_configurations(struct ena_llq_configurations *llq_co
llq_config->llq_ring_entry_size_value = 128;
}
static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
{
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev;
......@@ -3358,7 +3372,7 @@ static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
u32 max_tx_queue_size;
u32 max_rx_queue_size;
if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
......@@ -3432,11 +3446,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
int io_queue_num, bars, rc;
struct net_device *netdev;
static int adapters_found;
u32 max_num_io_queues;
char *queue_type_str;
bool wd_state;
int bars, rc;
dev_dbg(&pdev->dev, "%s\n", __func__);
......@@ -3497,27 +3512,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.pdev = pdev;
/* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
* Updated during device initialization with the real granularity
*/
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
rc = ena_calc_queue_size(&calc_queue_ctx);
if (rc || io_queue_num <= 0) {
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
rc = ena_calc_io_queue_size(&calc_queue_ctx);
if (rc || !max_num_io_queues) {
rc = -EFAULT;
goto err_device_destroy;
}
dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
io_queue_num,
calc_queue_ctx.rx_queue_size,
calc_queue_ctx.tx_queue_size,
(ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
"ENABLED" : "DISABLED");
/* dev zeroed in init_etherdev */
netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
if (!netdev) {
dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
rc = -ENOMEM;
......@@ -3545,7 +3553,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
adapter->num_queues = io_queue_num;
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0;
adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
......@@ -3569,7 +3579,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&adapter->syncp);
rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) {
dev_err(&pdev->dev,
"Failed to enable and set the admin interrupts\n");
......@@ -3611,9 +3621,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_type_str = "Low Latency";
dev_info(&pdev->dev,
"%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
"%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
netdev->dev_addr, io_queue_num, queue_type_str);
netdev->dev_addr, queue_type_str);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
......
......@@ -82,6 +82,8 @@
#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_MIN_RING_SIZE (256)
#define ENA_MIN_NUM_IO_QUEUES (1)
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
......@@ -161,10 +163,10 @@ struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
struct pci_dev *pdev;
u16 tx_queue_size;
u16 rx_queue_size;
u16 max_tx_queue_size;
u16 max_rx_queue_size;
u32 tx_queue_size;
u32 rx_queue_size;
u32 max_tx_queue_size;
u32 max_rx_queue_size;
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
};
......@@ -324,7 +326,8 @@ struct ena_adapter {
u32 rx_copybreak;
u32 max_mtu;
int num_queues;
u32 num_io_queues;
u32 max_num_io_queues;
int msix_vecs;
......@@ -387,6 +390,7 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册