ixgbe_lib.c 36.2 KB
Newer Older
1 2 3
/*******************************************************************************

  Intel 10 Gigabit PCI Express Linux driver
4
  Copyright(c) 1999 - 2016 Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  You should have received a copy of the GNU General Public License along with
  this program; if not, write to the Free Software Foundation, Inc.,
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Contact Information:
23
  Linux NICS <linux.nics@intel.com>
24 25 26 27 28 29 30 31
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497

*******************************************************************************/

#include "ixgbe.h"
#include "ixgbe_sriov.h"

32
#ifdef CONFIG_IXGBE_DCB
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/**
 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
 * @adapter: board private structure to initialize
 *
 * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
 * will also try to cache the proper offsets if RSS/FCoE are enabled along
 * with VMDq.
 *
 **/
static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
{
#ifdef IXGBE_FCOE
	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
#endif /* IXGBE_FCOE */
	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
	int i;
49
	u16 reg_idx, pool;
50
	u8 tcs = adapter->hw_tcs;
51 52 53 54 55 56 57 58 59 60

	/* verify we have DCB queueing enabled before proceeding */
	if (tcs <= 1)
		return false;

	/* verify we have VMDq enabled before proceeding */
	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
		return false;

	/* start at VMDq register offset for SR-IOV enabled setups */
61
	pool = 0;
62
	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
63
	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
64
		/* If we are greater than indices move to next pool */
65 66
		if ((reg_idx & ~vmdq->mask) >= tcs) {
			pool++;
67
			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
68
		}
69
		adapter->rx_ring[i]->reg_idx = reg_idx;
70
		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
	}

	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
		/* If we are greater than indices move to next pool */
		if ((reg_idx & ~vmdq->mask) >= tcs)
			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
		adapter->tx_ring[i]->reg_idx = reg_idx;
	}

#ifdef IXGBE_FCOE
	/* nothing to do if FCoE is disabled */
	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
		return true;

	/* The work is already done if the FCoE ring is shared */
	if (fcoe->offset < tcs)
		return true;

	/* The FCoE rings exist separately, we need to move their reg_idx */
	if (fcoe->indices) {
		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);

		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
			adapter->rx_ring[i]->reg_idx = reg_idx;
99
			adapter->rx_ring[i]->netdev = adapter->netdev;
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
			reg_idx++;
		}

		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
			adapter->tx_ring[i]->reg_idx = reg_idx;
			reg_idx++;
		}
	}

#endif /* IXGBE_FCOE */
	return true;
}

115 116 117 118 119
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
				    unsigned int *tx, unsigned int *rx)
{
	struct ixgbe_hw *hw = &adapter->hw;
120
	u8 num_tcs = adapter->hw_tcs;
121 122 123 124 125 126

	*tx = 0;
	*rx = 0;

	switch (hw->mac.type) {
	case ixgbe_mac_82598EB:
127 128 129
		/* TxQs/TC: 4	RxQs/TC: 8 */
		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
130 131 132
		break;
	case ixgbe_mac_82599EB:
	case ixgbe_mac_X540:
133 134
	case ixgbe_mac_X550:
	case ixgbe_mac_X550EM_x:
135
	case ixgbe_mac_x550em_a:
136
		if (num_tcs > 4) {
137 138 139 140 141 142 143 144 145 146 147 148
			/*
			 * TCs    : TC0/1 TC2/3 TC4-7
			 * TxQs/TC:    32    16     8
			 * RxQs/TC:    16    16    16
			 */
			*rx = tc << 4;
			if (tc < 3)
				*tx = tc << 5;		/*   0,  32,  64 */
			else if (tc < 5)
				*tx = (tc + 2) << 4;	/*  80,  96 */
			else
				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
149
		} else {
150 151 152 153 154 155 156 157 158 159
			/*
			 * TCs    : TC0 TC1 TC2/3
			 * TxQs/TC:  64  32    16
			 * RxQs/TC:  32  32    32
			 */
			*rx = tc << 5;
			if (tc < 2)
				*tx = tc << 6;		/*  0,  64 */
			else
				*tx = (tc + 4) << 4;	/* 96, 112 */
160 161 162 163 164 165 166 167 168 169 170 171 172
		}
	default:
		break;
	}
}

/**
 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
 * @adapter: board private structure to initialize
 *
 * Cache the descriptor ring offsets for DCB to the assigned rings.
 *
 **/
173
static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
174
{
175
	u8 num_tcs = adapter->hw_tcs;
176 177
	unsigned int tx_idx, rx_idx;
	int tc, offset, rss_i, i;
178

179 180
	/* verify we have DCB queueing enabled before proceeding */
	if (num_tcs <= 1)
181 182
		return false;

183
	rss_i = adapter->ring_feature[RING_F_RSS].indices;
184

185 186 187 188 189
	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
190
			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
191 192
			adapter->tx_ring[offset + i]->dcb_tc = tc;
			adapter->rx_ring[offset + i]->dcb_tc = tc;
193 194 195 196 197 198
		}
	}

	return true;
}

199
#endif
200 201 202 203 204 205 206 207
/**
 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
 * @adapter: board private structure to initialize
 *
 * SR-IOV doesn't use any descriptor rings but changes the default if
 * no other mapping is used.
 *
 */
208
static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
209
{
210 211 212 213 214
#ifdef IXGBE_FCOE
	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
#endif /* IXGBE_FCOE */
	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
215
	u16 reg_idx, pool;
216 217 218 219
	int i;

	/* only proceed if VMDq is enabled */
	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
220
		return false;
221 222

	/* start at VMDq register offset for SR-IOV enabled setups */
223
	pool = 0;
224 225 226 227 228 229 230 231
	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
		/* Allow first FCoE queue to be mapped as RSS */
		if (fcoe->offset && (i > fcoe->offset))
			break;
#endif
		/* If we are greater than indices move to next pool */
232 233
		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
			pool++;
234
			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
235
		}
236
		adapter->rx_ring[i]->reg_idx = reg_idx;
237
		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
238 239 240 241
	}

#ifdef IXGBE_FCOE
	/* FCoE uses a linear block of queues so just assigning 1:1 */
242
	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
243
		adapter->rx_ring[i]->reg_idx = reg_idx;
244 245
		adapter->rx_ring[i]->netdev = adapter->netdev;
	}
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268

#endif
	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
		/* Allow first FCoE queue to be mapped as RSS */
		if (fcoe->offset && (i > fcoe->offset))
			break;
#endif
		/* If we are greater than indices move to next pool */
		if ((reg_idx & rss->mask) >= rss->indices)
			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
		adapter->tx_ring[i]->reg_idx = reg_idx;
	}

#ifdef IXGBE_FCOE
	/* FCoE uses a linear block of queues so just assigning 1:1 */
	for (; i < adapter->num_tx_queues; i++, reg_idx++)
		adapter->tx_ring[i]->reg_idx = reg_idx;

#endif

	return true;
269 270
}

271 272 273 274 275 276 277 278 279
/**
 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
 * @adapter: board private structure to initialize
 *
 * Cache the descriptor ring offsets for RSS to the assigned rings.
 *
 **/
static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
280
	int i, reg_idx;
281

282
	for (i = 0; i < adapter->num_rx_queues; i++) {
283
		adapter->rx_ring[i]->reg_idx = i;
284 285
		adapter->rx_ring[i]->netdev = adapter->netdev;
	}
286 287 288 289
	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
		adapter->tx_ring[i]->reg_idx = reg_idx;
	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
		adapter->xdp_ring[i]->reg_idx = reg_idx;
290 291 292 293

	return true;
}

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
/**
 * ixgbe_cache_ring_register - Descriptor ring to register mapping
 * @adapter: board private structure to initialize
 *
 * Once we know the feature-set enabled for the device, we'll cache
 * the register offset the descriptor ring is assigned to.
 *
 * Note, the order the various feature calls is important.  It must start with
 * the "most" features enabled at the same time, then trickle down to the
 * least amount of features turned on at once.
 **/
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
	/* start with default case */
	adapter->rx_ring[0]->reg_idx = 0;
	adapter->tx_ring[0]->reg_idx = 0;

311 312
#ifdef CONFIG_IXGBE_DCB
	if (ixgbe_cache_ring_dcb_sriov(adapter))
313 314 315 316
		return;

	if (ixgbe_cache_ring_dcb(adapter))
		return;
317

318
#endif
319 320
	if (ixgbe_cache_ring_sriov(adapter))
		return;
321

322
	ixgbe_cache_ring_rss(adapter);
323 324
}

325 326 327 328 329
static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
{
	return adapter->xdp_prog ? nr_cpu_ids : 0;
}

330
#define IXGBE_RSS_64Q_MASK	0x3F
331 332 333 334 335 336 337
#define IXGBE_RSS_16Q_MASK	0xF
#define IXGBE_RSS_8Q_MASK	0x7
#define IXGBE_RSS_4Q_MASK	0x3
#define IXGBE_RSS_2Q_MASK	0x1
#define IXGBE_RSS_DISABLED_MASK	0x0

#ifdef CONFIG_IXGBE_DCB
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
/**
 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
 * @adapter: board private structure to initialize
 *
 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
 * and VM pools where appropriate.  Also assign queues based on DCB
 * priorities and map accordingly..
 *
 **/
static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
{
	int i;
	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
	u16 vmdq_m = 0;
#ifdef IXGBE_FCOE
	u16 fcoe_i = 0;
#endif
355
	u8 tcs = adapter->hw_tcs;
356 357 358 359 360 361 362 363 364

	/* verify we have DCB queueing enabled before proceeding */
	if (tcs <= 1)
		return false;

	/* verify we have VMDq enabled before proceeding */
	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
		return false;

365 366 367
	/* limit VMDq instances on the PF by number of Tx queues */
	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	/* Add starting offset to total pool count */
	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;

	/* 16 pools w/ 8 TC per pool */
	if (tcs > 4) {
		vmdq_i = min_t(u16, vmdq_i, 16);
		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
	/* 32 pools w/ 4 TC per pool */
	} else {
		vmdq_i = min_t(u16, vmdq_i, 32);
		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
	}

#ifdef IXGBE_FCOE
	/* queues in the remaining pools are available for FCoE */
	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;

#endif
	/* remove the starting offset from the pool count */
	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;

	/* save features for later use */
	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;

	/*
	 * We do not support DCB, VMDq, and RSS all simultaneously
	 * so we will disable RSS since it is the lowest priority
	 */
	adapter->ring_feature[RING_F_RSS].indices = 1;
	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;

400 401 402
	/* disable ATR as it is not supported when VMDq is enabled */
	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;

403 404 405 406
	adapter->num_rx_pools = vmdq_i;
	adapter->num_rx_queues_per_pool = tcs;

	adapter->num_tx_queues = vmdq_i * tcs;
407
	adapter->num_xdp_queues = 0;
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
	adapter->num_rx_queues = vmdq_i * tcs;

#ifdef IXGBE_FCOE
	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
		struct ixgbe_ring_feature *fcoe;

		fcoe = &adapter->ring_feature[RING_F_FCOE];

		/* limit ourselves based on feature limits */
		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);

		if (fcoe_i) {
			/* alloc queues for FCoE separately */
			fcoe->indices = fcoe_i;
			fcoe->offset = vmdq_i * tcs;

			/* add queues to adapter */
			adapter->num_tx_queues += fcoe_i;
			adapter->num_rx_queues += fcoe_i;
		} else if (tcs > 1) {
			/* use queue belonging to FcoE TC */
			fcoe->indices = 1;
			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
		} else {
			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;

			fcoe->indices = 0;
			fcoe->offset = 0;
		}
	}

#endif /* IXGBE_FCOE */
	/* configure TC to queue mapping */
	for (i = 0; i < tcs; i++)
		netdev_set_tc_queue(adapter->netdev, i, 1, i);

	return true;
}

447 448 449 450 451 452 453 454
static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
	struct net_device *dev = adapter->netdev;
	struct ixgbe_ring_feature *f;
	int rss_i, rss_m, i;
	int tcs;

	/* Map queue offset and counts onto allocated tx queues */
455
	tcs = adapter->hw_tcs;
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

	/* verify we have DCB queueing enabled before proceeding */
	if (tcs <= 1)
		return false;

	/* determine the upper limit for our current DCB mode */
	rss_i = dev->num_tx_queues / tcs;
	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
		/* 8 TC w/ 4 queues per TC */
		rss_i = min_t(u16, rss_i, 4);
		rss_m = IXGBE_RSS_4Q_MASK;
	} else if (tcs > 4) {
		/* 8 TC w/ 8 queues per TC */
		rss_i = min_t(u16, rss_i, 8);
		rss_m = IXGBE_RSS_8Q_MASK;
	} else {
		/* 4 TC w/ 16 queues per TC */
		rss_i = min_t(u16, rss_i, 16);
		rss_m = IXGBE_RSS_16Q_MASK;
	}

	/* set RSS mask and indices */
	f = &adapter->ring_feature[RING_F_RSS];
	rss_i = min_t(int, rss_i, f->limit);
	f->indices = rss_i;
	f->mask = rss_m;

483 484 485
	/* disable ATR as it is not supported when multiple TCs are enabled */
	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
#ifdef IXGBE_FCOE
	/* FCoE enabled queues require special configuration indexed
	 * by feature specific indices and offset. Here we map FCoE
	 * indices onto the DCB queue pairs allowing FCoE to own
	 * configuration later.
	 */
	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
		u8 tc = ixgbe_fcoe_get_tc(adapter);

		f = &adapter->ring_feature[RING_F_FCOE];
		f->indices = min_t(u16, rss_i, f->limit);
		f->offset = rss_i * tc;
	}

#endif /* IXGBE_FCOE */
	for (i = 0; i < tcs; i++)
		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);

	adapter->num_tx_queues = rss_i * tcs;
505
	adapter->num_xdp_queues = 0;
506 507 508 509 510 511
	adapter->num_rx_queues = rss_i * tcs;

	return true;
}

#endif
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
/**
 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
 * @adapter: board private structure to initialize
 *
 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
 * and VM pools where appropriate.  If RSS is available, then also try and
 * enable RSS and map accordingly.
 *
 **/
static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
{
	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
	u16 vmdq_m = 0;
	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
#ifdef IXGBE_FCOE
	u16 fcoe_i = 0;
#endif

	/* only proceed if SR-IOV is enabled */
	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
		return false;

535 536 537
	/* limit l2fwd RSS based on total Tx queue limit */
	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);

538 539 540 541 542 543 544
	/* Add starting offset to total pool count */
	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;

	/* double check we are limited to maximum pools */
	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);

	/* 64 pool mode with 2 queues per pool */
545
	if (vmdq_i > 32) {
546 547 548
		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
		rss_m = IXGBE_RSS_2Q_MASK;
		rss_i = min_t(u16, rss_i, 2);
549
	/* 32 pool mode with up to 4 queues per pool */
550 551 552
	} else {
		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
		rss_m = IXGBE_RSS_4Q_MASK;
553 554
		/* We can support 4, 2, or 1 queues */
		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
	}

#ifdef IXGBE_FCOE
	/* queues in the remaining pools are available for FCoE */
	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));

#endif
	/* remove the starting offset from the pool count */
	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;

	/* save features for later use */
	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;

	/* limit RSS based on user input and save for later use */
	adapter->ring_feature[RING_F_RSS].indices = rss_i;
	adapter->ring_feature[RING_F_RSS].mask = rss_m;

	adapter->num_rx_pools = vmdq_i;
	adapter->num_rx_queues_per_pool = rss_i;

	adapter->num_rx_queues = vmdq_i * rss_i;
	adapter->num_tx_queues = vmdq_i * rss_i;
578
	adapter->num_xdp_queues = 0;
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621

	/* disable ATR as it is not supported when VMDq is enabled */
	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;

#ifdef IXGBE_FCOE
	/*
	 * FCoE can use rings from adjacent buffers to allow RSS
	 * like behavior.  To account for this we need to add the
	 * FCoE indices to the total ring count.
	 */
	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
		struct ixgbe_ring_feature *fcoe;

		fcoe = &adapter->ring_feature[RING_F_FCOE];

		/* limit ourselves based on feature limits */
		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);

		if (vmdq_i > 1 && fcoe_i) {
			/* alloc queues for FCoE separately */
			fcoe->indices = fcoe_i;
			fcoe->offset = vmdq_i * rss_i;
		} else {
			/* merge FCoE queues with RSS queues */
			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());

			/* limit indices to rss_i if MSI-X is disabled */
			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
				fcoe_i = rss_i;

			/* attempt to reserve some queues for just FCoE */
			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
			fcoe->offset = fcoe_i - fcoe->indices;

			fcoe_i -= rss_i;
		}

		/* add queues to adapter */
		adapter->num_tx_queues += fcoe_i;
		adapter->num_rx_queues += fcoe_i;
	}

#endif
622 623 624 625
	/* populate TC0 for use by pool 0 */
	netdev_set_tc_queue(adapter->netdev, 0,
			    adapter->num_rx_queues_per_pool, 0);

626 627 628
	return true;
}

629
/**
630
 * ixgbe_set_rss_queues - Allocate queues for RSS
631 632 633 634 635 636
 * @adapter: board private structure to initialize
 *
 * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
 *
 **/
637
static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
638
{
639
	struct ixgbe_hw *hw = &adapter->hw;
640 641
	struct ixgbe_ring_feature *f;
	u16 rss_i;
642

643 644 645
	/* set mask for 16 queue limit of RSS */
	f = &adapter->ring_feature[RING_F_RSS];
	rss_i = f->limit;
646

647
	f->indices = rss_i;
648 649 650 651 652

	if (hw->mac.type < ixgbe_mac_X550)
		f->mask = IXGBE_RSS_16Q_MASK;
	else
		f->mask = IXGBE_RSS_64Q_MASK;
653

654 655 656
	/* disable ATR by default, it will be configured below */
	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;

657
	/*
658
	 * Use Flow Director in addition to RSS to ensure the best
659 660 661
	 * distribution of flows across cores, even when an FDIR flow
	 * isn't matched.
	 */
662
	if (rss_i > 1 && adapter->atr_sample_rate) {
663 664
		f = &adapter->ring_feature[RING_F_FDIR];

665
		rss_i = f->indices = f->limit;
666 667 668

		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
669
	}
670

671
#ifdef IXGBE_FCOE
672 673 674 675 676 677 678 679 680 681 682
	/*
	 * FCoE can exist on the same rings as standard network traffic
	 * however it is preferred to avoid that if possible.  In order
	 * to get the best performance we allocate as many FCoE queues
	 * as we can and we place them at the end of the ring array to
	 * avoid sharing queues with standard RSS on systems with 24 or
	 * more CPUs.
	 */
	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
		struct net_device *dev = adapter->netdev;
		u16 fcoe_i;
683

684
		f = &adapter->ring_feature[RING_F_FCOE];
685

686 687 688
		/* merge FCoE queues with RSS queues */
		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
689

690 691 692
		/* limit indices to rss_i if MSI-X is disabled */
		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
			fcoe_i = rss_i;
693

694 695 696 697
		/* attempt to reserve some queues for just FCoE */
		f->indices = min_t(u16, fcoe_i, f->limit);
		f->offset = fcoe_i - f->indices;
		rss_i = max_t(u16, fcoe_i, rss_i);
698 699 700
	}

#endif /* IXGBE_FCOE */
701 702
	adapter->num_rx_queues = rss_i;
	adapter->num_tx_queues = rss_i;
703
	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
704 705 706 707 708

	return true;
}

/**
709
 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
710 711 712 713 714 715 716 717 718
 * @adapter: board private structure to initialize
 *
 * This is the top level queue allocation routine.  The order here is very
 * important, starting with the "most" number of features turned on at once,
 * and ending with the smallest set of features.  This way large combinations
 * can be allocated if they're turned on, and smaller combinations are the
 * fallthrough conditions.
 *
 **/
719
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
720 721 722 723
{
	/* Start with base case */
	adapter->num_rx_queues = 1;
	adapter->num_tx_queues = 1;
724
	adapter->num_xdp_queues = 0;
725
	adapter->num_rx_pools = 1;
726 727
	adapter->num_rx_queues_per_pool = 1;

728 729
#ifdef CONFIG_IXGBE_DCB
	if (ixgbe_set_dcb_sriov_queues(adapter))
730
		return;
731 732

	if (ixgbe_set_dcb_queues(adapter))
733
		return;
734 735

#endif
736 737 738
	if (ixgbe_set_sriov_queues(adapter))
		return;

739
	ixgbe_set_rss_queues(adapter);
740 741
}

742 743 744 745 746 747 748 749 750
/**
 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
 * @adapter: board private structure
 *
 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
 * return a negative error code if unable to acquire MSI-X vectors for any
 * reason.
 */
static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
751
{
752 753 754
	struct ixgbe_hw *hw = &adapter->hw;
	int i, vectors, vector_threshold;

755 756 757
	/* We start by asking for one vector per queue pair with XDP queues
	 * being stacked with TX queues.
	 */
758
	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
759
	vectors = max(vectors, adapter->num_xdp_queues);
760

761 762 763 764
	/* It is easy to be greedy for MSI-X vectors. However, it really
	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
	 * be somewhat conservative and only ask for (roughly) the same number
	 * of vectors as there are CPUs.
765
	 */
766
	vectors = min_t(int, vectors, num_online_cpus());
767

768 769 770 771 772 773 774 775 776 777 778 779 780
	/* Some vectors are necessary for non-queue interrupts */
	vectors += NON_Q_VECTORS;

	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
	 * With features such as RSS and VMDq, we can easily surpass the
	 * number of Rx and Tx descriptor queues supported by our device.
	 * Thus, we cap the maximum in the rare cases where the CPU count also
	 * exceeds our vector limit
	 */
	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);

	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
	 * handler, and (2) an Other (Link Status Change, etc.) handler.
781
	 */
782 783
	vector_threshold = MIN_MSIX_COUNT;

784 785 786 787 788 789 790 791 792
	adapter->msix_entries = kcalloc(vectors,
					sizeof(struct msix_entry),
					GFP_KERNEL);
	if (!adapter->msix_entries)
		return -ENOMEM;

	for (i = 0; i < vectors; i++)
		adapter->msix_entries[i].entry = i;

793 794
	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
					vector_threshold, vectors);
795

796
	if (vectors < 0) {
797 798
		/* A negative count of allocated vectors indicates an error in
		 * acquiring within the specified range of MSI-X vectors
799
		 */
800 801 802
		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
			   vectors);

803 804 805
		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
		kfree(adapter->msix_entries);
		adapter->msix_entries = NULL;
806 807

		return vectors;
808
	}
809 810 811 812 813 814 815 816 817 818 819 820 821

	/* we successfully allocated some number of vectors within our
	 * requested range.
	 */
	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;

	/* Adjust for only the vectors we'll use, which is minimum
	 * of max_q_vectors, or the number of vectors we were allocated.
	 */
	vectors -= NON_Q_VECTORS;
	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);

	return 0;
822 823 824 825 826 827 828 829
}

static void ixgbe_add_ring(struct ixgbe_ring *ring,
			   struct ixgbe_ring_container *head)
{
	ring->next = head->ring;
	head->ring = ring;
	head->count++;
830
	head->next_update = jiffies + 1;
831 832 833 834 835
}

/**
 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
 * @adapter: board private structure to initialize
836
 * @v_count: q_vectors allocated on adapter, used for ring interleaving
837
 * @v_idx: index of vector in adapter struct
838 839
 * @txr_count: total number of Tx rings to allocate
 * @txr_idx: index of first Tx ring to allocate
840 841
 * @xdp_count: total number of XDP rings to allocate
 * @xdp_idx: index of first XDP ring to allocate
842 843
 * @rxr_count: total number of Rx rings to allocate
 * @rxr_idx: index of first Rx ring to allocate
844 845 846
 *
 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
 **/
847 848
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
				int v_count, int v_idx,
849
				int txr_count, int txr_idx,
850
				int xdp_count, int xdp_idx,
851 852 853 854
				int rxr_count, int rxr_idx)
{
	struct ixgbe_q_vector *q_vector;
	struct ixgbe_ring *ring;
855
	int node = NUMA_NO_NODE;
856 857
	int cpu = -1;
	int ring_count, size;
858
	u8 tcs = adapter->hw_tcs;
859

860
	ring_count = txr_count + rxr_count + xdp_count;
861 862 863 864
	size = sizeof(struct ixgbe_q_vector) +
	       (sizeof(struct ixgbe_ring) * ring_count);

	/* customize cpu for Flow Director mapping */
865 866 867 868 869 870 871
	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
		if (rss_i > 1 && adapter->atr_sample_rate) {
			if (cpu_online(v_idx)) {
				cpu = v_idx;
				node = cpu_to_node(cpu);
			}
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
		}
	}

	/* allocate q_vector and rings */
	q_vector = kzalloc_node(size, GFP_KERNEL, node);
	if (!q_vector)
		q_vector = kzalloc(size, GFP_KERNEL);
	if (!q_vector)
		return -ENOMEM;

	/* setup affinity mask and node */
	if (cpu != -1)
		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
	q_vector->numa_node = node;

887 888 889 890 891
#ifdef CONFIG_IXGBE_DCA
	/* initialize CPU for DCA */
	q_vector->cpu = -1;

#endif
892 893 894 895 896 897 898 899 900 901 902 903
	/* initialize NAPI */
	netif_napi_add(adapter->netdev, &q_vector->napi,
		       ixgbe_poll, 64);

	/* tie q_vector and adapter together */
	adapter->q_vector[v_idx] = q_vector;
	q_vector->adapter = adapter;
	q_vector->v_idx = v_idx;

	/* initialize work limits */
	q_vector->tx.work_limit = adapter->tx_work_limit;

904 905 906 907 908
	/* Initialize setting for adaptive ITR */
	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
			   IXGBE_ITR_ADAPTIVE_LATENCY;
	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
			   IXGBE_ITR_ADAPTIVE_LATENCY;
909

910 911 912 913
	/* intialize ITR */
	if (txr_count && !rxr_count) {
		/* tx only vector */
		if (adapter->tx_itr_setting == 1)
914
			q_vector->itr = IXGBE_12K_ITR;
915 916 917 918 919 920 921 922 923 924
		else
			q_vector->itr = adapter->tx_itr_setting;
	} else {
		/* rx or rx/tx vector */
		if (adapter->rx_itr_setting == 1)
			q_vector->itr = IXGBE_20K_ITR;
		else
			q_vector->itr = adapter->rx_itr_setting;
	}

925 926 927
	/* initialize pointer to rings */
	ring = q_vector->ring;

928 929 930 931 932 933 934 935 936 937 938 939 940
	while (txr_count) {
		/* assign generic ring traits */
		ring->dev = &adapter->pdev->dev;
		ring->netdev = adapter->netdev;

		/* configure backlink on ring */
		ring->q_vector = q_vector;

		/* update q_vector Tx values */
		ixgbe_add_ring(ring, &q_vector->tx);

		/* apply Tx specific ring traits */
		ring->count = adapter->tx_ring_count;
941
		ring->queue_index = txr_idx;
942 943 944 945 946 947

		/* assign ring to adapter */
		adapter->tx_ring[txr_idx] = ring;

		/* update count and index */
		txr_count--;
948
		txr_idx += v_count;
949 950 951 952 953

		/* push pointer to next ring */
		ring++;
	}

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
	while (xdp_count) {
		/* assign generic ring traits */
		ring->dev = &adapter->pdev->dev;
		ring->netdev = adapter->netdev;

		/* configure backlink on ring */
		ring->q_vector = q_vector;

		/* update q_vector Tx values */
		ixgbe_add_ring(ring, &q_vector->tx);

		/* apply Tx specific ring traits */
		ring->count = adapter->tx_ring_count;
		ring->queue_index = xdp_idx;
		set_ring_xdp(ring);

		/* assign ring to adapter */
		adapter->xdp_ring[xdp_idx] = ring;

		/* update count and index */
		xdp_count--;
		xdp_idx++;

		/* push pointer to next ring */
		ring++;
	}

981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
	while (rxr_count) {
		/* assign generic ring traits */
		ring->dev = &adapter->pdev->dev;
		ring->netdev = adapter->netdev;

		/* configure backlink on ring */
		ring->q_vector = q_vector;

		/* update q_vector Rx values */
		ixgbe_add_ring(ring, &q_vector->rx);

		/*
		 * 82599 errata, UDP frames with a 0 checksum
		 * can be marked as checksum errors.
		 */
		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);

999 1000 1001 1002
#ifdef IXGBE_FCOE
		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
			struct ixgbe_ring_feature *f;
			f = &adapter->ring_feature[RING_F_FCOE];
1003 1004
			if ((rxr_idx >= f->offset) &&
			    (rxr_idx < f->offset + f->indices))
1005
				set_bit(__IXGBE_RX_FCOE, &ring->state);
1006 1007 1008
		}

#endif /* IXGBE_FCOE */
1009 1010
		/* apply Rx specific ring traits */
		ring->count = adapter->rx_ring_count;
1011
		ring->queue_index = rxr_idx;
1012 1013 1014 1015 1016 1017

		/* assign ring to adapter */
		adapter->rx_ring[rxr_idx] = ring;

		/* update count and index */
		rxr_count--;
1018
		rxr_idx += v_count;
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040

		/* push pointer to next ring */
		ring++;
	}

	return 0;
}

/**
 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
 * @adapter: board private structure to initialize
 * @v_idx: Index of vector to be freed
 *
 * This function frees the memory allocated to the q_vector.  In addition if
 * NAPI is enabled it will delete any references to the NAPI struct prior
 * to freeing the q_vector.
 **/
static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
{
	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
	struct ixgbe_ring *ring;

1041 1042 1043 1044 1045 1046
	ixgbe_for_each_ring(ring, q_vector->tx) {
		if (ring_is_xdp(ring))
			adapter->xdp_ring[ring->queue_index] = NULL;
		else
			adapter->tx_ring[ring->queue_index] = NULL;
	}
1047 1048 1049 1050 1051

	ixgbe_for_each_ring(ring, q_vector->rx)
		adapter->rx_ring[ring->queue_index] = NULL;

	adapter->q_vector[v_idx] = NULL;
1052
	napi_hash_del(&q_vector->napi);
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	netif_napi_del(&q_vector->napi);

	/*
	 * ixgbe_get_stats64() might access the rings on this vector,
	 * we must wait a grace period before freeing it.
	 */
	kfree_rcu(q_vector, rcu);
}

/**
 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
 * @adapter: board private structure to initialize
 *
 * We allocate one q_vector per queue interrupt.  If allocation fails we
 * return -ENOMEM.
 **/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
1071
	int q_vectors = adapter->num_q_vectors;
1072 1073
	int rxr_remaining = adapter->num_rx_queues;
	int txr_remaining = adapter->num_tx_queues;
1074 1075
	int xdp_remaining = adapter->num_xdp_queues;
	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1076 1077 1078 1079 1080 1081
	int err;

	/* only one q_vector if MSI-X is disabled. */
	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
		q_vectors = 1;

1082
	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1083 1084
		for (; rxr_remaining; v_idx++) {
			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1085
						   0, 0, 0, 0, 1, rxr_idx);
1086 1087 1088 1089 1090

			if (err)
				goto err_out;

			/* update counts and index */
1091 1092
			rxr_remaining--;
			rxr_idx++;
1093 1094 1095
		}
	}

1096 1097 1098
	for (; v_idx < q_vectors; v_idx++) {
		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1099 1100
		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);

1101
		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1102
					   tqpv, txr_idx,
1103
					   xqpv, xdp_idx,
1104 1105 1106 1107 1108 1109 1110 1111
					   rqpv, rxr_idx);

		if (err)
			goto err_out;

		/* update counts and index */
		rxr_remaining -= rqpv;
		txr_remaining -= tqpv;
1112
		xdp_remaining -= xqpv;
1113 1114
		rxr_idx++;
		txr_idx++;
1115
		xdp_idx += xqpv;
1116 1117 1118 1119 1120
	}

	return 0;

err_out:
1121
	adapter->num_tx_queues = 0;
1122
	adapter->num_xdp_queues = 0;
1123 1124 1125 1126
	adapter->num_rx_queues = 0;
	adapter->num_q_vectors = 0;

	while (v_idx--)
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
		ixgbe_free_q_vector(adapter, v_idx);

	return -ENOMEM;
}

/**
 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
 * @adapter: board private structure to initialize
 *
 * This function frees the memory allocated to the q_vectors.  In addition if
 * NAPI is enabled it will delete any references to the NAPI struct prior
 * to freeing the q_vector.
 **/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
1142
	int v_idx = adapter->num_q_vectors;
1143

1144
	adapter->num_tx_queues = 0;
1145
	adapter->num_xdp_queues = 0;
1146 1147
	adapter->num_rx_queues = 0;
	adapter->num_q_vectors = 0;
1148

1149
	while (v_idx--)
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
		ixgbe_free_q_vector(adapter, v_idx);
}

static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
{
	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
		pci_disable_msix(adapter->pdev);
		kfree(adapter->msix_entries);
		adapter->msix_entries = NULL;
	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
		pci_disable_msi(adapter->pdev);
	}
}

/**
 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
 * @adapter: board private structure to initialize
 *
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 **/
1173
static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1174
{
1175
	int err;
1176

1177 1178
	/* We will try to get MSI-X interrupts first */
	if (!ixgbe_acquire_msix_vectors(adapter))
1179
		return;
1180

1181 1182 1183 1184 1185
	/* At this point, we do not have MSI-X capabilities. We need to
	 * reconfigure or disable various features which require MSI-X
	 * capability.
	 */

1186
	/* Disable DCB unless we only have a single traffic class */
1187
	if (adapter->hw_tcs > 1) {
1188
		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1189
		netdev_reset_tc(adapter->netdev);
1190

1191 1192 1193 1194 1195 1196 1197
		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;

		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
		adapter->temp_dcb_cfg.pfc_mode_enable = false;
		adapter->dcb_cfg.pfc_mode_enable = false;
	}
1198

1199
	adapter->hw_tcs = 0;
1200 1201 1202
	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;

1203 1204
	/* Disable SR-IOV support */
	e_dev_warn("Disabling SR-IOV support\n");
1205
	ixgbe_disable_sriov(adapter);
1206

1207 1208
	/* Disable RSS */
	e_dev_warn("Disabling RSS support\n");
1209
	adapter->ring_feature[RING_F_RSS].limit = 1;
1210

1211 1212 1213
	/* recalculate number of queues now that many features have been
	 * changed or disabled.
	 */
1214
	ixgbe_set_num_queues(adapter);
1215 1216
	adapter->num_q_vectors = 1;

1217
	err = pci_enable_msi(adapter->pdev);
1218 1219 1220 1221 1222
	if (err)
		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
			   err);
	else
		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
}

/**
 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
 * @adapter: board private structure to initialize
 *
 * We determine which interrupt scheme to use based on...
 * - Kernel support (MSI, MSI-X)
 *   - which can be user-defined (via MODULE_PARAM)
 * - Hardware queue count (num_*_queues)
 *   - defined by miscellaneous hardware support/features (RSS, etc.)
 **/
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
{
	int err;

	/* Number of supported queues */
1240
	ixgbe_set_num_queues(adapter);
1241

1242 1243
	/* Set interrupt mode */
	ixgbe_set_interrupt_capability(adapter);
1244 1245 1246 1247 1248 1249 1250 1251 1252

	err = ixgbe_alloc_q_vectors(adapter);
	if (err) {
		e_dev_err("Unable to allocate memory for queue vectors\n");
		goto err_alloc_q_vectors;
	}

	ixgbe_cache_ring_register(adapter);

1253
	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1254
		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1255 1256
		   adapter->num_rx_queues, adapter->num_tx_queues,
		   adapter->num_xdp_queues);
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276

	set_bit(__IXGBE_DOWN, &adapter->state);

	return 0;

err_alloc_q_vectors:
	ixgbe_reset_interrupt_capability(adapter);
	return err;
}

/**
 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
 * @adapter: board private structure to clear interrupt scheme on
 *
 * We go through and clear interrupt specific resources and reset the structure
 * to pre-load conditions
 **/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
	adapter->num_tx_queues = 0;
1277
	adapter->num_xdp_queues = 0;
1278 1279 1280 1281 1282 1283 1284
	adapter->num_rx_queues = 0;

	ixgbe_free_q_vectors(adapter);
	ixgbe_reset_interrupt_capability(adapter);
}

void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1285
		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
{
	struct ixgbe_adv_tx_context_desc *context_desc;
	u16 i = tx_ring->next_to_use;

	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);

	i++;
	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;

	/* set bits to identify this as an advanced context descriptor */
	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;

	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1299
	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
1300 1301 1302 1303
	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
}