vmxnet3_ethtool.c 35.4 KB
Newer Older
1 2 3
/*
 * Linux driver for VMware's vmxnet3 ethernet NIC.
 *
4
 * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation; version 2 of the License and no later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
 *
23
 * Maintained by: pv-drivers@vmware.com
24 25 26 27 28
 *
 */


#include "vmxnet3_int.h"
29 30 31 32
#include <net/vxlan.h>
#include <net/geneve.h>

#define VXLAN_UDP_PORT 8472
33 34 35 36 37 38 39 40 41 42 43

struct vmxnet3_stat_desc {
	char desc[ETH_GSTRING_LEN];
	int  offset;
};


/* per tq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_tq_dev_stats[] = {
	/* description,         offset */
44 45 46 47 48 49 50 51 52 53 54
	{ "Tx Queue#",        0 },
	{ "  TSO pkts tx",	offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
	{ "  TSO bytes tx",	offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
	{ "  ucast pkts tx",	offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
	{ "  ucast bytes tx",	offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
	{ "  mcast pkts tx",	offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
	{ "  mcast bytes tx",	offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
	{ "  bcast pkts tx",	offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
	{ "  bcast bytes tx",	offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
	{ "  pkts tx err",	offsetof(struct UPT1_TxStats, pktsTxError) },
	{ "  pkts tx discard",	offsetof(struct UPT1_TxStats, pktsTxDiscard) },
55 56 57 58 59 60
};

/* per tq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_tq_driver_stats[] = {
	/* description,         offset */
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
	{"  drv dropped tx total",	offsetof(struct vmxnet3_tq_driver_stats,
						 drop_total) },
	{ "     too many frags", offsetof(struct vmxnet3_tq_driver_stats,
					  drop_too_many_frags) },
	{ "     giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
					 drop_oversized_hdr) },
	{ "     hdr err",	offsetof(struct vmxnet3_tq_driver_stats,
					 drop_hdr_inspect_err) },
	{ "     tso",		offsetof(struct vmxnet3_tq_driver_stats,
					 drop_tso) },
	{ "  ring full",	offsetof(struct vmxnet3_tq_driver_stats,
					 tx_ring_full) },
	{ "  pkts linearized",	offsetof(struct vmxnet3_tq_driver_stats,
					 linearized) },
	{ "  hdr cloned",	offsetof(struct vmxnet3_tq_driver_stats,
					 copy_skb_header) },
	{ "  giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
					 oversized_hdr) },
79 80 81 82 83
};

/* per rq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_rq_dev_stats[] = {
84 85 86 87 88 89 90 91 92 93 94
	{ "Rx Queue#",        0 },
	{ "  LRO pkts rx",	offsetof(struct UPT1_RxStats, LROPktsRxOK) },
	{ "  LRO byte rx",	offsetof(struct UPT1_RxStats, LROBytesRxOK) },
	{ "  ucast pkts rx",	offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
	{ "  ucast bytes rx",	offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
	{ "  mcast pkts rx",	offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
	{ "  mcast bytes rx",	offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
	{ "  bcast pkts rx",	offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
	{ "  bcast bytes rx",	offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
	{ "  pkts rx OOB",	offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
	{ "  pkts rx err",	offsetof(struct UPT1_RxStats, pktsRxError) },
95 96 97 98 99 100
};

/* per rq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_rq_driver_stats[] = {
	/* description,         offset */
101 102 103 104 105 106 107 108
	{ "  drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
					     drop_total) },
	{ "     err",		offsetof(struct vmxnet3_rq_driver_stats,
					 drop_err) },
	{ "     fcs",		offsetof(struct vmxnet3_rq_driver_stats,
					 drop_fcs) },
	{ "  rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
					  rx_buf_alloc_failure) },
109 110
};

S
stephen hemminger 已提交
111
/* global stats maintained by the driver */
112 113 114
static const struct vmxnet3_stat_desc
vmxnet3_global_stats[] = {
	/* description,         offset */
115
	{ "tx timeout count",	offsetof(struct vmxnet3_adapter,
116 117 118 119
					 tx_timeout_count) }
};


120
void
121 122
vmxnet3_get_stats64(struct net_device *netdev,
		   struct rtnl_link_stats64 *stats)
123 124 125 126 127 128
{
	struct vmxnet3_adapter *adapter;
	struct vmxnet3_tq_driver_stats *drvTxStats;
	struct vmxnet3_rq_driver_stats *drvRxStats;
	struct UPT1_TxStats *devTxStats;
	struct UPT1_RxStats *devRxStats;
129
	unsigned long flags;
130
	int i;
131 132 133 134

	adapter = netdev_priv(netdev);

	/* Collect the dev stats into the shared area */
135
	spin_lock_irqsave(&adapter->cmd_lock, flags);
136
	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
137
	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
138

139 140 141
	for (i = 0; i < adapter->num_tx_queues; i++) {
		devTxStats = &adapter->tqd_start[i].stats;
		drvTxStats = &adapter->tx_queue[i].stats;
142 143 144 145 146 147 148 149
		stats->tx_packets += devTxStats->ucastPktsTxOK +
				     devTxStats->mcastPktsTxOK +
				     devTxStats->bcastPktsTxOK;
		stats->tx_bytes += devTxStats->ucastBytesTxOK +
				   devTxStats->mcastBytesTxOK +
				   devTxStats->bcastBytesTxOK;
		stats->tx_errors += devTxStats->pktsTxError;
		stats->tx_dropped += drvTxStats->drop_total;
150
	}
151

152 153 154
	for (i = 0; i < adapter->num_rx_queues; i++) {
		devRxStats = &adapter->rqd_start[i].stats;
		drvRxStats = &adapter->rx_queue[i].stats;
155 156 157
		stats->rx_packets += devRxStats->ucastPktsRxOK +
				     devRxStats->mcastPktsRxOK +
				     devRxStats->bcastPktsRxOK;
158

159 160 161
		stats->rx_bytes += devRxStats->ucastBytesRxOK +
				   devRxStats->mcastBytesRxOK +
				   devRxStats->bcastBytesRxOK;
162

163 164 165
		stats->rx_errors += devRxStats->pktsRxError;
		stats->rx_dropped += drvRxStats->drop_total;
		stats->multicast +=  devRxStats->mcastPktsRxOK;
166
	}
167 168 169 170 171
}

static int
vmxnet3_get_sset_count(struct net_device *netdev, int sset)
{
172
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
173 174
	switch (sset) {
	case ETH_SS_STATS:
175 176 177 178 179 180
		return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
			ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
		       adapter->num_tx_queues +
		       (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
			ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
		       adapter->num_rx_queues +
181 182 183 184 185 186 187
			ARRAY_SIZE(vmxnet3_global_stats);
	default:
		return -EOPNOTSUPP;
	}
}


188 189 190 191 192 193
/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
 * the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
 * Therefore, if any registers are added, removed or modified, then a version
 * bump and a corresponding change in the vmxnet3 support for ethtool(8)
 * --register-dump would be required.
 */
194 195 196
static int
vmxnet3_get_regs_len(struct net_device *netdev)
{
197
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
198 199 200 201 202 203

	return ((9 /* BAR1 registers */ +
		(1 + adapter->intr.num_intrs) +
		(1 + adapter->num_tx_queues * 17 /* Tx queue registers */) +
		(1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) *
		sizeof(u32));
204 205 206 207 208 209 210 211 212 213 214 215 216 217
}


static void
vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

	strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));

	strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
		sizeof(drvinfo->version));

	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
218
		sizeof(drvinfo->bus_info));
219 220 221 222 223 224
}


static void
vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
225 226 227 228 229 230 231 232 233 234 235
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
	int i, j;

	if (stringset != ETH_SS_STATS)
		return;

	for (j = 0; j < adapter->num_tx_queues; j++) {
		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
			ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc);
		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
			ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc);
236
	}
237 238 239 240 241 242 243 244 245 246

	for (j = 0; j < adapter->num_rx_queues; j++) {
		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
			ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc);
		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
			ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc);
	}

	for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
		ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc);
247 248
}

249 250 251 252 253 254 255 256 257 258
netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
				       netdev_features_t features)
{
	/* If Rx checksum is disabled, then LRO should also be disabled */
	if (!(features & NETIF_F_RXCSUM))
		features &= ~NETIF_F_LRO;

	return features;
}

259 260 261 262 263 264 265 266 267 268
netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
					 struct net_device *netdev,
					 netdev_features_t features)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

	/* Validate if the tunneled packet is being offloaded by the device */
	if (VMXNET3_VERSION_GE_4(adapter) &&
	    skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
		u8 l4_proto = 0;
269 270
		u16 port;
		struct udphdr *udph;
271 272 273 274 275 276 277 278 279 280 281 282

		switch (vlan_get_protocol(skb)) {
		case htons(ETH_P_IP):
			l4_proto = ip_hdr(skb)->protocol;
			break;
		case htons(ETH_P_IPV6):
			l4_proto = ipv6_hdr(skb)->nexthdr;
			break;
		default:
			return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
		}

283 284 285 286 287 288 289 290 291 292 293 294
		switch (l4_proto) {
		case IPPROTO_UDP:
			udph = udp_hdr(skb);
			port = be16_to_cpu(udph->dest);
			/* Check if offloaded port is supported */
			if (port != GENEVE_UDP_PORT &&
			    port != IANA_VXLAN_UDP_PORT &&
			    port != VXLAN_UDP_PORT) {
				return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
			}
			break;
		default:
295
			return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
296
		}
297 298 299 300
	}
	return features;
}

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
static void vmxnet3_enable_encap_offloads(struct net_device *netdev)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

	if (VMXNET3_VERSION_GE_4(adapter)) {
		netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM |
			NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
			NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
			NETIF_F_GSO_UDP_TUNNEL_CSUM;
	}
}

static void vmxnet3_disable_encap_offloads(struct net_device *netdev)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

	if (VMXNET3_VERSION_GE_4(adapter)) {
		netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM |
			NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
			NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
			NETIF_F_GSO_UDP_TUNNEL_CSUM);
	}
}

327
int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
328
{
329
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
330
	unsigned long flags;
331
	netdev_features_t changed = features ^ netdev->features;
332 333 334
	netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL |
					     NETIF_F_GSO_UDP_TUNNEL_CSUM;
	u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0;
335

336
	if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO |
337
		       NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) {
338 339 340 341 342 343
		if (features & NETIF_F_RXCSUM)
			adapter->shared->devRead.misc.uptFeatures |=
			UPT1_F_RXCSUM;
		else
			adapter->shared->devRead.misc.uptFeatures &=
			~UPT1_F_RXCSUM;
344

S
stephen hemminger 已提交
345
		/* update hardware LRO capability accordingly */
346
		if (features & NETIF_F_LRO)
347
			adapter->shared->devRead.misc.uptFeatures |=
348
							UPT1_F_LRO;
349 350
		else
			adapter->shared->devRead.misc.uptFeatures &=
351
							~UPT1_F_LRO;
352

353
		if (features & NETIF_F_HW_VLAN_CTAG_RX)
354 355 356 357 358 359
			adapter->shared->devRead.misc.uptFeatures |=
			UPT1_F_RXVLAN;
		else
			adapter->shared->devRead.misc.uptFeatures &=
			~UPT1_F_RXVLAN;

360 361 362 363 364 365 366 367 368 369 370
		if ((features & tun_offload_mask) != 0 && !udp_tun_enabled) {
			vmxnet3_enable_encap_offloads(netdev);
			adapter->shared->devRead.misc.uptFeatures |=
			UPT1_F_RXINNEROFLD;
		} else if ((features & tun_offload_mask) == 0 &&
			   udp_tun_enabled) {
			vmxnet3_disable_encap_offloads(netdev);
			adapter->shared->devRead.misc.uptFeatures &=
			~UPT1_F_RXINNEROFLD;
		}

371
		spin_lock_irqsave(&adapter->cmd_lock, flags);
372 373
		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
				       VMXNET3_CMD_UPDATE_FEATURE);
374
		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
375 376 377 378 379 380 381 382 383
	}
	return 0;
}

static void
vmxnet3_get_ethtool_stats(struct net_device *netdev,
			  struct ethtool_stats *stats, u64  *buf)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
384
	unsigned long flags;
385 386
	u8 *base;
	int i;
387
	int j = 0;
388

389
	spin_lock_irqsave(&adapter->cmd_lock, flags);
390
	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
391
	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
392 393

	/* this does assume each counter is 64-bit wide */
394 395 396 397 398 399 400 401 402 403 404 405
	for (j = 0; j < adapter->num_tx_queues; j++) {
		base = (u8 *)&adapter->tqd_start[j].stats;
		*buf++ = (u64)j;
		for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
			*buf++ = *(u64 *)(base +
					  vmxnet3_tq_dev_stats[i].offset);

		base = (u8 *)&adapter->tx_queue[j].stats;
		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
			*buf++ = *(u64 *)(base +
					  vmxnet3_tq_driver_stats[i].offset);
	}
406

407
	for (j = 0; j < adapter->num_rx_queues; j++) {
408 409 410 411 412 413 414 415 416 417 418
		base = (u8 *)&adapter->rqd_start[j].stats;
		*buf++ = (u64) j;
		for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
			*buf++ = *(u64 *)(base +
					  vmxnet3_rq_dev_stats[i].offset);

		base = (u8 *)&adapter->rx_queue[j].stats;
		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
			*buf++ = *(u64 *)(base +
					  vmxnet3_rq_driver_stats[i].offset);
	}
419 420 421 422 423 424 425

	base = (u8 *)adapter;
	for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
		*buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
}


426 427 428 429 430 431
/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
 * the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
 * Therefore, if any registers are added, removed or modified, then a version
 * bump and a corresponding change in the vmxnet3 support for ethtool(8)
 * --register-dump would be required.
 */
432 433 434 435 436
static void
vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
	u32 *buf = p;
437
	int i = 0, j = 0;
438 439 440

	memset(p, 0, vmxnet3_get_regs_len(netdev));

441
	regs->version = 2;
442 443 444

	/* Update vmxnet3_get_regs_len if we want to dump more registers */

445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL);
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH);
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
	buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR);

	buf[j++] = adapter->intr.num_intrs;
	for (i = 0; i < adapter->intr.num_intrs; i++) {
		buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR
						 + i * VMXNET3_REG_ALIGN);
	}
460

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
	buf[j++] = adapter->num_tx_queues;
	for (i = 0; i < adapter->num_tx_queues; i++) {
		struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];

		buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_TXPROD +
						 i * VMXNET3_REG_ALIGN);

		buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA);
		buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA);
		buf[j++] = tq->tx_ring.size;
		buf[j++] = tq->tx_ring.next2fill;
		buf[j++] = tq->tx_ring.next2comp;
		buf[j++] = tq->tx_ring.gen;

		buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
		buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
		buf[j++] = tq->data_ring.size;
478
		buf[j++] = tq->txdata_desc_size;
479 480 481 482 483 484 485 486

		buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
		buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
		buf[j++] = tq->comp_ring.size;
		buf[j++] = tq->comp_ring.next2proc;
		buf[j++] = tq->comp_ring.gen;

		buf[j++] = tq->stopped;
487 488
	}

489
	buf[j++] = adapter->num_rx_queues;
490
	for (i = 0; i < adapter->num_rx_queues; i++) {
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];

		buf[j++] =  VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD +
						  i * VMXNET3_REG_ALIGN);
		buf[j++] =  VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD2 +
						  i * VMXNET3_REG_ALIGN);

		buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
		buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA);
		buf[j++] = rq->rx_ring[0].size;
		buf[j++] = rq->rx_ring[0].next2fill;
		buf[j++] = rq->rx_ring[0].next2comp;
		buf[j++] = rq->rx_ring[0].gen;

		buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA);
		buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA);
		buf[j++] = rq->rx_ring[1].size;
		buf[j++] = rq->rx_ring[1].next2fill;
		buf[j++] = rq->rx_ring[1].next2comp;
		buf[j++] = rq->rx_ring[1].gen;

512 513 514 515
		buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
		buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
		buf[j++] = rq->rx_ring[0].size;
		buf[j++] = rq->data_ring.desc_size;
516

517 518 519 520 521 522
		buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
		buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
		buf[j++] = rq->comp_ring.size;
		buf[j++] = rq->comp_ring.next2proc;
		buf[j++] = rq->comp_ring.gen;
	}
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
}


static void
vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

	wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
	wol->wolopts = adapter->wol;
}


static int
vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

	if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
			    WAKE_MAGICSECURE)) {
		return -EOPNOTSUPP;
	}

	adapter->wol = wol->wolopts;

	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);

	return 0;
}


static int
555 556
vmxnet3_get_link_ksettings(struct net_device *netdev,
			   struct ethtool_link_ksettings *ecmd)
557 558 559
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

560 561 562 563 564 565 566
	ethtool_link_ksettings_zero_link_mode(ecmd, supported);
	ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full);
	ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full);
	ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
	ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
	ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP);
	ecmd->base.port = PORT_TP;
567 568

	if (adapter->link_speed) {
569 570
		ecmd->base.speed = adapter->link_speed;
		ecmd->base.duplex = DUPLEX_FULL;
571
	} else {
572 573
		ecmd->base.speed = SPEED_UNKNOWN;
		ecmd->base.duplex = DUPLEX_UNKNOWN;
574 575 576 577 578 579 580 581 582 583 584 585 586
	}
	return 0;
}


static void
vmxnet3_get_ringparam(struct net_device *netdev,
		      struct ethtool_ringparam *param)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

	param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
	param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
587 588
	param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ?
		VMXNET3_RXDATA_DESC_MAX_SIZE : 0;
589
	param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
590

591 592
	param->rx_pending = adapter->rx_ring_size;
	param->tx_pending = adapter->tx_ring_size;
593 594
	param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ?
		adapter->rxdata_desc_size : 0;
595
	param->rx_jumbo_pending = adapter->rx_ring2_size;
596 597 598 599 600 601 602 603
}


static int
vmxnet3_set_ringparam(struct net_device *netdev,
		      struct ethtool_ringparam *param)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
604
	u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
605
	u16 new_rxdata_desc_size;
606 607 608 609 610 611 612 613 614 615 616
	u32 sz;
	int err = 0;

	if (param->tx_pending == 0 || param->tx_pending >
						VMXNET3_TX_RING_MAX_SIZE)
		return -EINVAL;

	if (param->rx_pending == 0 || param->rx_pending >
						VMXNET3_RX_RING_MAX_SIZE)
		return -EINVAL;

617 618 619 620
	if (param->rx_jumbo_pending == 0 ||
	    param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
		return -EINVAL;

621 622 623 624 625 626
	/* if adapter not yet initialized, do nothing */
	if (adapter->rx_buf_per_pkt == 0) {
		netdev_err(netdev, "adapter not completely initialized, "
			   "ring size cannot be changed yet\n");
		return -EOPNOTSUPP;
	}
627

628
	if (VMXNET3_VERSION_GE_3(adapter)) {
629
		if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE)
630 631 632 633 634
			return -EINVAL;
	} else if (param->rx_mini_pending != 0) {
		return -EINVAL;
	}

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	/* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
	new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
							~VMXNET3_RING_SIZE_MASK;
	new_tx_ring_size = min_t(u32, new_tx_ring_size,
				 VMXNET3_TX_RING_MAX_SIZE);
	if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
						VMXNET3_RING_SIZE_ALIGN) != 0)
		return -EINVAL;

	/* ring0 has to be a multiple of
	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
	 */
	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
	new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
	new_rx_ring_size = min_t(u32, new_rx_ring_size,
				 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
	if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
							   sz) != 0)
		return -EINVAL;

655 656 657 658 659 660
	/* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
	new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
				~VMXNET3_RING_SIZE_MASK;
	new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
				  VMXNET3_RX_RING2_MAX_SIZE);

661 662 663 664 665 666 667 668 669
	/* rx data ring buffer size has to be a multiple of
	 * VMXNET3_RXDATA_DESC_SIZE_ALIGN
	 */
	new_rxdata_desc_size =
		(param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
		~VMXNET3_RXDATA_DESC_SIZE_MASK;
	new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
				     VMXNET3_RXDATA_DESC_MAX_SIZE);

670 671
	if (new_tx_ring_size == adapter->tx_ring_size &&
	    new_rx_ring_size == adapter->rx_ring_size &&
672 673
	    new_rx_ring2_size == adapter->rx_ring2_size &&
	    new_rxdata_desc_size == adapter->rxdata_desc_size) {
674 675 676 677 678 679 680 681
		return 0;
	}

	/*
	 * Reset_work may be in the middle of resetting the device, wait for its
	 * completion.
	 */
	while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
682
		usleep_range(1000, 2000);
683 684 685 686 687 688 689

	if (netif_running(netdev)) {
		vmxnet3_quiesce_dev(adapter);
		vmxnet3_reset_dev(adapter);

		/* recreate the rx queue and the tx queue based on the
		 * new sizes */
690 691
		vmxnet3_tq_destroy_all(adapter);
		vmxnet3_rq_destroy_all(adapter);
692 693

		err = vmxnet3_create_queues(adapter, new_tx_ring_size,
694
					    new_rx_ring_size, new_rx_ring2_size,
695 696
					    adapter->txdata_desc_size,
					    new_rxdata_desc_size);
697 698 699
		if (err) {
			/* failed, most likely because of OOM, try default
			 * size */
700 701
			netdev_err(netdev, "failed to apply new sizes, "
				   "try the default ones\n");
702
			new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
703
			new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
704
			new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
705 706 707
			new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
				VMXNET3_DEF_RXDATA_DESC_SIZE : 0;

708
			err = vmxnet3_create_queues(adapter,
709 710
						    new_tx_ring_size,
						    new_rx_ring_size,
711
						    new_rx_ring2_size,
712 713
						    adapter->txdata_desc_size,
						    new_rxdata_desc_size);
714
			if (err) {
715 716
				netdev_err(netdev, "failed to create queues "
					   "with default sizes. Closing it\n");
717 718 719 720 721 722
				goto out;
			}
		}

		err = vmxnet3_activate_dev(adapter);
		if (err)
723 724
			netdev_err(netdev, "failed to re-activate, error %d."
				   " Closing it\n", err);
725
	}
726 727
	adapter->tx_ring_size = new_tx_ring_size;
	adapter->rx_ring_size = new_rx_ring_size;
728
	adapter->rx_ring2_size = new_rx_ring2_size;
729
	adapter->rxdata_desc_size = new_rxdata_desc_size;
730 731 732 733 734 735 736 737 738

out:
	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
	if (err)
		vmxnet3_force_close(adapter);

	return err;
}

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
static int
vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
			  struct ethtool_rxnfc *info)
{
	enum Vmxnet3_RSSField rss_fields;

	if (netif_running(adapter->netdev)) {
		unsigned long flags;

		spin_lock_irqsave(&adapter->cmd_lock, flags);

		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
				       VMXNET3_CMD_GET_RSS_FIELDS);
		rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
	} else {
		rss_fields = adapter->rss_fields;
	}

	info->data = 0;

	/* Report default options for RSS on vmxnet3 */
	switch (info->flow_type) {
	case TCP_V4_FLOW:
	case TCP_V6_FLOW:
		info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
			      RXH_IP_SRC | RXH_IP_DST;
		break;
	case UDP_V4_FLOW:
		if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4)
			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
		info->data |= RXH_IP_SRC | RXH_IP_DST;
		break;
	case AH_ESP_V4_FLOW:
	case AH_V4_FLOW:
	case ESP_V4_FLOW:
		if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4)
			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
777
		fallthrough;
778 779 780 781 782 783 784 785 786 787 788 789
	case SCTP_V4_FLOW:
	case IPV4_FLOW:
		info->data |= RXH_IP_SRC | RXH_IP_DST;
		break;
	case UDP_V6_FLOW:
		if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6)
			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
		info->data |= RXH_IP_SRC | RXH_IP_DST;
		break;
	case AH_ESP_V6_FLOW:
	case AH_V6_FLOW:
	case ESP_V6_FLOW:
790 791 792 793
		if (VMXNET3_VERSION_GE_6(adapter) &&
		    (rss_fields & VMXNET3_RSS_FIELDS_ESPIP6))
			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
		fallthrough;
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
	case SCTP_V6_FLOW:
	case IPV6_FLOW:
		info->data |= RXH_IP_SRC | RXH_IP_DST;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int
vmxnet3_set_rss_hash_opt(struct net_device *netdev,
			 struct vmxnet3_adapter *adapter,
			 struct ethtool_rxnfc *nfc)
{
	enum Vmxnet3_RSSField rss_fields = adapter->rss_fields;

	/* RSS does not support anything other than hashing
	 * to queues on src and dst IPs and ports
	 */
	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
		return -EINVAL;

	switch (nfc->flow_type) {
	case TCP_V4_FLOW:
	case TCP_V6_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST) ||
		    !(nfc->data & RXH_L4_B_0_1) ||
		    !(nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		break;
	case UDP_V4_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST))
			return -EINVAL;
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4;
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4;
			break;
		default:
			return -EINVAL;
		}
		break;
	case UDP_V6_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST))
			return -EINVAL;
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6;
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6;
			break;
		default:
			return -EINVAL;
		}
		break;
	case ESP_V4_FLOW:
	case AH_V4_FLOW:
	case AH_ESP_V4_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST))
			return -EINVAL;
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4;
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4;
		break;
		default:
			return -EINVAL;
		}
		break;
	case ESP_V6_FLOW:
	case AH_V6_FLOW:
	case AH_ESP_V6_FLOW:
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
		if (!VMXNET3_VERSION_GE_6(adapter))
			return -EOPNOTSUPP;
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST))
			return -EINVAL;
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6;
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6;
			break;
		default:
			return -EINVAL;
		}
		break;
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
	case SCTP_V4_FLOW:
	case SCTP_V6_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST) ||
		    (nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		break;
	default:
		return -EINVAL;
	}

	/* if we changed something we need to update flags */
	if (rss_fields != adapter->rss_fields) {
		adapter->default_rss_fields = false;
		if (netif_running(netdev)) {
			struct Vmxnet3_DriverShared *shared = adapter->shared;
			union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
			unsigned long flags;

			spin_lock_irqsave(&adapter->cmd_lock, flags);
			cmdInfo->setRssFields = rss_fields;
			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
					       VMXNET3_CMD_SET_RSS_FIELDS);

			/* Not all requested RSS may get applied, so get and
			 * cache what was actually applied.
			 */
			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
					       VMXNET3_CMD_GET_RSS_FIELDS);
			adapter->rss_fields =
				VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
			spin_unlock_irqrestore(&adapter->cmd_lock, flags);
		} else {
			/* When the device is activated, we will try to apply
			 * these rules and cache the applied value later.
			 */
			adapter->rss_fields = rss_fields;
		}
	}
	return 0;
}
936

937 938
static int
vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
939
		  u32 *rules)
940 941
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
942 943
	int err = 0;

944 945 946
	switch (info->cmd) {
	case ETHTOOL_GRXRINGS:
		info->data = adapter->num_rx_queues;
947 948 949 950 951 952
		break;
	case ETHTOOL_GRXFH:
		if (!VMXNET3_VERSION_GE_4(adapter)) {
			err = -EOPNOTSUPP;
			break;
		}
953 954 955 956 957 958
#ifdef VMXNET3_RSS
		if (!adapter->rss) {
			err = -EOPNOTSUPP;
			break;
		}
#endif
959 960 961 962 963
		err = vmxnet3_get_rss_hash_opts(adapter, info);
		break;
	default:
		err = -EOPNOTSUPP;
		break;
964
	}
965 966 967 968 969 970 971 972 973 974 975 976 977 978

	return err;
}

static int
vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
	int err = 0;

	if (!VMXNET3_VERSION_GE_4(adapter)) {
		err = -EOPNOTSUPP;
		goto done;
	}
979 980 981 982 983 984
#ifdef VMXNET3_RSS
	if (!adapter->rss) {
		err = -EOPNOTSUPP;
		goto done;
	}
#endif
985 986 987 988 989 990 991 992 993 994 995 996

	switch (info->cmd) {
	case ETHTOOL_SRXFH:
		err = vmxnet3_set_rss_hash_opt(netdev, adapter, info);
		break;
	default:
		err = -EOPNOTSUPP;
		break;
	}

done:
	return err;
997 998
}

999
#ifdef VMXNET3_RSS
1000 1001 1002 1003 1004 1005 1006 1007 1008
static u32
vmxnet3_get_rss_indir_size(struct net_device *netdev)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
	struct UPT1_RSSConf *rssConf = adapter->rss_conf;

	return rssConf->indTableSize;
}

1009
static int
1010
vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
1011 1012 1013
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
1014
	unsigned int n = rssConf->indTableSize;
1015

1016 1017 1018 1019
	if (hfunc)
		*hfunc = ETH_RSS_HASH_TOP;
	if (!p)
		return 0;
1020 1021
	if (n > UPT1_RSS_MAX_IND_TABLE_SIZE)
		return 0;
1022
	while (n--)
1023
		p[n] = rssConf->indTable[n];
1024 1025 1026 1027 1028
	return 0;

}

static int
1029 1030
vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
		const u8 hfunc)
1031 1032
{
	unsigned int i;
1033
	unsigned long flags;
1034 1035 1036
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
	struct UPT1_RSSConf *rssConf = adapter->rss_conf;

1037 1038 1039 1040 1041 1042
	/* We do not allow change in unsupported parameters */
	if (key ||
	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
		return -EOPNOTSUPP;
	if (!p)
		return 0;
1043
	for (i = 0; i < rssConf->indTableSize; i++)
1044
		rssConf->indTable[i] = p[i];
1045

1046
	spin_lock_irqsave(&adapter->cmd_lock, flags);
1047 1048
	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
			       VMXNET3_CMD_UPDATE_RSSIDT);
1049
	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1050 1051 1052 1053

	return 0;

}
1054
#endif
1055

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
static int
vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);

	if (!VMXNET3_VERSION_GE_3(adapter))
		return -EOPNOTSUPP;

	switch (adapter->coal_conf->coalMode) {
	case VMXNET3_COALESCE_DISABLED:
		/* struct ethtool_coalesce is already initialized to 0 */
		break;
	case VMXNET3_COALESCE_ADAPT:
		ec->use_adaptive_rx_coalesce = true;
		break;
	case VMXNET3_COALESCE_STATIC:
		ec->tx_max_coalesced_frames =
			adapter->coal_conf->coalPara.coalStatic.tx_comp_depth;
		ec->rx_max_coalesced_frames =
			adapter->coal_conf->coalPara.coalStatic.rx_depth;
		break;
	case VMXNET3_COALESCE_RBC: {
		u32 rbc_rate;

		rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate;
		ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate);
	}
		break;
	default:
		return -EOPNOTSUPP;
	}

	return 0;
}

static int
vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
{
	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
	struct Vmxnet3_DriverShared *shared = adapter->shared;
	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
	unsigned long flags;

	if (!VMXNET3_VERSION_GE_3(adapter))
		return -EOPNOTSUPP;

	if ((ec->rx_coalesce_usecs == 0) &&
	    (ec->use_adaptive_rx_coalesce == 0) &&
	    (ec->tx_max_coalesced_frames == 0) &&
	    (ec->rx_max_coalesced_frames == 0)) {
		memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
		adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
		goto done;
	}

	if (ec->rx_coalesce_usecs != 0) {
		u32 rbc_rate;

		if ((ec->use_adaptive_rx_coalesce != 0) ||
		    (ec->tx_max_coalesced_frames != 0) ||
		    (ec->rx_max_coalesced_frames != 0)) {
			return -EINVAL;
		}

		rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs);
		if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE ||
		    rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) {
			return -EINVAL;
		}

		memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
		adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC;
		adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate;
		goto done;
	}

	if (ec->use_adaptive_rx_coalesce != 0) {
		if ((ec->rx_coalesce_usecs != 0) ||
		    (ec->tx_max_coalesced_frames != 0) ||
		    (ec->rx_max_coalesced_frames != 0)) {
			return -EINVAL;
		}
		memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
		adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT;
		goto done;
	}

	if ((ec->tx_max_coalesced_frames != 0) ||
	    (ec->rx_max_coalesced_frames != 0)) {
		if ((ec->rx_coalesce_usecs != 0) ||
		    (ec->use_adaptive_rx_coalesce != 0)) {
			return -EINVAL;
		}

		if ((ec->tx_max_coalesced_frames >
		    VMXNET3_COAL_STATIC_MAX_DEPTH) ||
		    (ec->rx_max_coalesced_frames >
		     VMXNET3_COAL_STATIC_MAX_DEPTH)) {
			return -EINVAL;
		}

		memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
		adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC;

		adapter->coal_conf->coalPara.coalStatic.tx_comp_depth =
			(ec->tx_max_coalesced_frames ?
			 ec->tx_max_coalesced_frames :
			 VMXNET3_COAL_STATIC_DEFAULT_DEPTH);

		adapter->coal_conf->coalPara.coalStatic.rx_depth =
			(ec->rx_max_coalesced_frames ?
			 ec->rx_max_coalesced_frames :
			 VMXNET3_COAL_STATIC_DEFAULT_DEPTH);

		adapter->coal_conf->coalPara.coalStatic.tx_depth =
			 VMXNET3_COAL_STATIC_DEFAULT_DEPTH;
		goto done;
	}

done:
	adapter->default_coal_mode = false;
	if (netif_running(netdev)) {
		spin_lock_irqsave(&adapter->cmd_lock, flags);
		cmdInfo->varConf.confVer = 1;
		cmdInfo->varConf.confLen =
			cpu_to_le32(sizeof(*adapter->coal_conf));
		cmdInfo->varConf.confPA  = cpu_to_le64(adapter->coal_conf_pa);
		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
				       VMXNET3_CMD_SET_COALESCE);
		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
	}

	return 0;
}

1191
static const struct ethtool_ops vmxnet3_ethtool_ops = {
1192 1193 1194
	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
				     ETHTOOL_COALESCE_MAX_FRAMES |
				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1195 1196 1197 1198 1199 1200
	.get_drvinfo       = vmxnet3_get_drvinfo,
	.get_regs_len      = vmxnet3_get_regs_len,
	.get_regs          = vmxnet3_get_regs,
	.get_wol           = vmxnet3_get_wol,
	.set_wol           = vmxnet3_set_wol,
	.get_link          = ethtool_op_get_link,
1201 1202
	.get_coalesce      = vmxnet3_get_coalesce,
	.set_coalesce      = vmxnet3_set_coalesce,
1203 1204 1205 1206 1207
	.get_strings       = vmxnet3_get_strings,
	.get_sset_count	   = vmxnet3_get_sset_count,
	.get_ethtool_stats = vmxnet3_get_ethtool_stats,
	.get_ringparam     = vmxnet3_get_ringparam,
	.set_ringparam     = vmxnet3_set_ringparam,
1208
	.get_rxnfc         = vmxnet3_get_rxnfc,
1209
	.set_rxnfc         = vmxnet3_set_rxnfc,
1210
#ifdef VMXNET3_RSS
1211
	.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
1212 1213
	.get_rxfh          = vmxnet3_get_rss,
	.set_rxfh          = vmxnet3_set_rss,
1214
#endif
1215
	.get_link_ksettings = vmxnet3_get_link_ksettings,
1216 1217 1218 1219
};

void vmxnet3_set_ethtool_ops(struct net_device *netdev)
{
1220
	netdev->ethtool_ops = &vmxnet3_ethtool_ops;
1221
}