提交 407257ce 编写于 作者: D David S. Miller

Merge branch 'ibmvnic-Implement-driver-defined-queue-limits'

Thomas Falcon says:

====================
ibmvnic: Implement driver-defined queue limits

In this patch series, update the ibmvnic driver to use driver-defined
queue limits instead of limits imposed by the Virtual I/O server
management partition. For some deviced, initial max queue size and
amount limits, despite their definition, can actually be exceeded if
the client driver requests it. With this in mind, define a private
ethtool flag that toggles the use of driver-defined limits. These
limits are currently more than what supported hardware will likely
allow, so the driver will attempt to get as close as possible to
the user request but may not fully succeed.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -2364,8 +2364,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
} else {
ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
}
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
......@@ -2378,21 +2383,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int ret;
if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
netdev_err(netdev, "Invalid request.\n");
netdev_err(netdev, "Max tx buffers = %llu\n",
adapter->max_rx_add_entries_per_subcrq);
netdev_err(netdev, "Max rx buffers = %llu\n",
adapter->max_tx_entries_per_subcrq);
return -EINVAL;
}
ret = 0;
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
return wait_for_reset(adapter);
ret = wait_for_reset(adapter);
if (!ret &&
(adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
adapter->req_tx_entries_per_subcrq != ring->tx_pending))
netdev_info(netdev,
"Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
ring->rx_pending, ring->tx_pending,
adapter->req_rx_add_entries_per_subcrq,
adapter->req_tx_entries_per_subcrq);
return ret;
}
static void ibmvnic_get_channels(struct net_device *netdev,
......@@ -2400,8 +2407,14 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
channels->max_rx = adapter->max_rx_queues;
channels->max_tx = adapter->max_tx_queues;
if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
channels->max_rx = adapter->max_rx_queues;
channels->max_tx = adapter->max_tx_queues;
} else {
channels->max_rx = IBMVNIC_MAX_QUEUES;
channels->max_tx = IBMVNIC_MAX_QUEUES;
}
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
......@@ -2414,11 +2427,23 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int ret;
ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
return wait_for_reset(adapter);
ret = wait_for_reset(adapter);
if (!ret &&
(adapter->req_rx_queues != channels->rx_count ||
adapter->req_tx_queues != channels->tx_count))
netdev_info(netdev,
"Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
channels->rx_count, channels->tx_count,
adapter->req_rx_queues, adapter->req_tx_queues);
return ret;
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
......@@ -2426,32 +2451,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
if (stringset != ETH_SS_STATS)
return;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
i++, data += ETH_GSTRING_LEN)
memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
for (i = 0; i < adapter->req_tx_queues; i++) {
snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
data += ETH_GSTRING_LEN;
for (i = 0; i < adapter->req_tx_queues; i++) {
snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN,
"tx%d_dropped_packets", i);
data += ETH_GSTRING_LEN;
}
snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->req_rx_queues; i++) {
snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
data += ETH_GSTRING_LEN;
for (i = 0; i < adapter->req_rx_queues; i++) {
snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
data += ETH_GSTRING_LEN;
}
break;
snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
data += ETH_GSTRING_LEN;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
strcpy(data + i * ETH_GSTRING_LEN,
ibmvnic_priv_flags[i]);
break;
default:
return;
}
}
......@@ -2464,6 +2500,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
......@@ -2514,6 +2552,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
}
static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
return adapter->priv_flags;
}
static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
if (which_maxes)
adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
else
adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
return 0;
}
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
......@@ -2527,6 +2584,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
.get_priv_flags = ibmvnic_get_priv_flags,
.set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
......
......@@ -39,7 +39,8 @@
#define IBMVNIC_RX_WEIGHT 16
/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 10
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
......@@ -48,6 +49,11 @@
#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
#define IBMVNIC_BUFFER_HLEN 500
static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
#define IBMVNIC_USE_SERVER_MAXES 0x1
"use-server-maxes"
};
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
......@@ -969,6 +975,7 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册