ice_lib.c 83.7 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */

#include "ice.h"
#include "ice_lib.h"
6
#include "ice_dcb_lib.h"
7

8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/**
 * ice_setup_rx_ctx - Configure a receive ring context
 * @ring: The Rx ring to configure
 *
 * Configure the Rx descriptor ring in RLAN context.
 */
static int ice_setup_rx_ctx(struct ice_ring *ring)
{
	struct ice_vsi *vsi = ring->vsi;
	struct ice_hw *hw = &vsi->back->hw;
	u32 rxdid = ICE_RXDID_FLEX_NIC;
	struct ice_rlan_ctx rlan_ctx;
	u32 regval;
	u16 pf_q;
	int err;

24
	/* what is Rx queue number in global space of 2K Rx queues */
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
	pf_q = vsi->rxq_map[ring->q_index];

	/* clear the context structure first */
	memset(&rlan_ctx, 0, sizeof(rlan_ctx));

	rlan_ctx.base = ring->dma >> 7;

	rlan_ctx.qlen = ring->count;

	/* Receive Packet Data Buffer Size.
	 * The Packet Data Buffer Size is defined in 128 byte units.
	 */
	rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;

	/* use 32 byte descriptors */
	rlan_ctx.dsize = 1;

	/* Strip the Ethernet CRC bytes before the packet is posted to host
	 * memory.
	 */
	rlan_ctx.crcstrip = 1;

	/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
	rlan_ctx.l2tsel = 1;

	rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
	rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
	rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;

	/* This controls whether VLAN is stripped from inner headers
	 * The VLAN in the inner L2 header is stripped to the receive
	 * descriptor if enabled by this flag.
	 */
	rlan_ctx.showiv = 0;

	/* Max packet size for this queue - must not be set to a larger value
	 * than 5 x DBUF
	 */
	rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
			       ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);

	/* Rx queue threshold in units of 64 */
	rlan_ctx.lrxqthresh = 1;

	 /* Enable Flexible Descriptors in the queue context which
	  * allows this driver to select a specific receive descriptor format
	  */
72 73 74 75 76
	if (vsi->type != ICE_VSI_VF) {
		regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
		regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
			QRXFLXP_CNTXT_RXDID_IDX_M;

77
		/* increasing context priority to pick up profile ID;
78 79 80 81 82
		 * default is 0x01; setting to 0x03 to ensure profile
		 * is programming if prev context is of same priority
		 */
		regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
			QRXFLXP_CNTXT_RXDID_PRIO_M;
83

84 85
		wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
	}
86 87 88 89 90 91 92 93 94 95

	/* Absolute queue number out of 2K needs to be passed */
	err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
	if (err) {
		dev_err(&vsi->back->pdev->dev,
			"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
			pf_q, err);
		return -EIO;
	}

96 97 98
	if (vsi->type == ICE_VSI_VF)
		return 0;

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
	/* init queue specific tail register */
	ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
	writel(0, ring->tail);
	ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));

	return 0;
}

/**
 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
 * @ring: The Tx ring to configure
 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
 * @pf_q: queue index in the PF space
 *
 * Configure the Tx descriptor ring in TLAN context.
 */
static void
ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
{
	struct ice_vsi *vsi = ring->vsi;
	struct ice_hw *hw = &vsi->back->hw;

	tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;

	tlan_ctx->port_num = vsi->port_info->lport;

	/* Transmit Queue Length */
	tlan_ctx->qlen = ring->count;

128 129
	ice_set_cgd_num(tlan_ctx, ring);

130 131 132 133 134 135 136 137 138 139
	/* PF number */
	tlan_ctx->pf_num = hw->pf_id;

	/* queue belongs to a specific VSI type
	 * VF / VM index should be programmed per vmvf_type setting:
	 * for vmvf_type = VF, it is VF number between 0-256
	 * for vmvf_type = VM, it is VM number between 0-767
	 * for PF or EMP this field should be set to zero
	 */
	switch (vsi->type) {
140 141
	case ICE_VSI_LB:
		/* fall through */
142 143 144
	case ICE_VSI_PF:
		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
		break;
145
	case ICE_VSI_VF:
146
		/* Firmware expects vmvf_num to be absolute VF ID */
147 148 149
		tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
		break;
150 151 152 153 154
	default:
		return;
	}

	/* make sure the context is associated with the right VSI */
155
	tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181

	tlan_ctx->tso_ena = ICE_TX_LEGACY;
	tlan_ctx->tso_qnum = pf_q;

	/* Legacy or Advanced Host Interface:
	 * 0: Advanced Host Interface
	 * 1: Legacy Host Interface
	 */
	tlan_ctx->legacy_int = ICE_TX_LEGACY;
}

/**
 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
 * @pf: the PF being configured
 * @pf_q: the PF queue
 * @ena: enable or disable state of the queue
 *
 * This routine will wait for the given Rx queue of the PF to reach the
 * enabled or disabled state.
 * Returns -ETIMEDOUT in case of failing to reach the requested state after
 * multiple retries; else will return 0 in case of success.
 */
static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
{
	int i;

182
	for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
183 184 185
		if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
			      QRX_CTRL_QENA_STAT_M))
			return 0;
186

187
		usleep_range(20, 40);
188 189
	}

190
	return -ETIMEDOUT;
191 192 193
}

/**
194
 * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
195 196
 * @vsi: the VSI being configured
 * @ena: start or stop the Rx rings
197
 * @rxq_idx: Rx queue index
198
 */
199 200 201 202
#ifndef CONFIG_PCI_IOV
static
#endif /* !CONFIG_PCI_IOV */
int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
203
{
204
	int pf_q = vsi->rxq_map[rxq_idx];
205 206
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
207 208
	int ret = 0;
	u32 rx_reg;
209

210
	rx_reg = rd32(hw, QRX_CTRL(pf_q));
211

212 213 214
	/* Skip if the queue is already in the requested state */
	if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
		return 0;
215

216 217 218 219 220 221
	/* turn on/off the queue */
	if (ena)
		rx_reg |= QRX_CTRL_QENA_REQ_M;
	else
		rx_reg &= ~QRX_CTRL_QENA_REQ_M;
	wr32(hw, QRX_CTRL(pf_q), rx_reg);
222

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
	/* wait for the change to finish */
	ret = ice_pf_rxq_wait(pf, pf_q, ena);
	if (ret)
		dev_err(&pf->pdev->dev,
			"VSI idx %d Rx ring %d %sable timeout\n",
			vsi->idx, pf_q, (ena ? "en" : "dis"));

	return ret;
}

/**
 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
 * @vsi: the VSI being configured
 * @ena: start or stop the Rx rings
 */
static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
{
	int i, ret = 0;

	for (i = 0; i < vsi->num_rxq; i++) {
		ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
		if (ret)
245 246 247 248 249 250
			break;
	}

	return ret;
}

251 252 253 254 255 256 257
/**
 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
 * @vsi: VSI pointer
 *
 * On error: returns error code (negative)
 * On success: returns 0
 */
258
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
259 260 261 262 263
{
	struct ice_pf *pf = vsi->back;

	/* allocate memory for both Tx and Rx ring pointers */
	vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
264
				     sizeof(*vsi->tx_rings), GFP_KERNEL);
265
	if (!vsi->tx_rings)
266
		return -ENOMEM;
267 268

	vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
269
				     sizeof(*vsi->rx_rings), GFP_KERNEL);
270
	if (!vsi->rx_rings)
271 272 273 274 275 276 277 278 279 280 281 282 283
		goto err_rings;

	vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
				    sizeof(*vsi->txq_map), GFP_KERNEL);

	if (!vsi->txq_map)
		goto err_txq_map;

	vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
				    sizeof(*vsi->rxq_map), GFP_KERNEL);
	if (!vsi->rxq_map)
		goto err_rxq_map;

284

285 286 287 288
	/* There is no need to allocate q_vectors for a loopback VSI. */
	if (vsi->type == ICE_VSI_LB)
		return 0;

289 290 291 292 293
	/* allocate memory for q_vector pointers */
	vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
				      sizeof(*vsi->q_vectors), GFP_KERNEL);
	if (!vsi->q_vectors)
		goto err_vectors;
294 295 296 297

	return 0;

err_vectors:
298 299 300 301
	devm_kfree(&pf->pdev->dev, vsi->rxq_map);
err_rxq_map:
	devm_kfree(&pf->pdev->dev, vsi->txq_map);
err_txq_map:
302
	devm_kfree(&pf->pdev->dev, vsi->rx_rings);
303
err_rings:
304 305 306 307 308
	devm_kfree(&pf->pdev->dev, vsi->tx_rings);
	return -ENOMEM;
}

/**
309 310 311 312 313 314 315
 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
 * @vsi: the VSI being configured
 */
static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
	switch (vsi->type) {
	case ICE_VSI_PF:
316 317
		/* fall through */
	case ICE_VSI_LB:
318 319 320 321 322 323 324 325 326 327 328 329 330
		vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
		vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
		break;
	default:
		dev_dbg(&vsi->back->pdev->dev,
			"Not setting number of Tx/Rx descriptors for VSI type %d\n",
			vsi->type);
		break;
	}
}

/**
 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
331
 * @vsi: the VSI being configured
332
 * @vf_id: ID of the VF being configured
333 334 335
 *
 * Return 0 on success and a negative value on error
 */
336
static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
337 338
{
	struct ice_pf *pf = vsi->back;
339 340 341 342 343
	struct ice_vf *vf = NULL;

	if (vsi->type == ICE_VSI_VF)
		vsi->vf_id = vf_id;

344 345 346 347 348 349
	switch (vsi->type) {
	case ICE_VSI_PF:
		vsi->alloc_txq = pf->num_lan_tx;
		vsi->alloc_rxq = pf->num_lan_rx;
		vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
		break;
350
	case ICE_VSI_VF:
351 352 353
		vf = &pf->vf[vsi->vf_id];
		vsi->alloc_txq = vf->num_vf_qs;
		vsi->alloc_rxq = vf->num_vf_qs;
354 355
		/* pf->num_vf_msix includes (VF miscellaneous vector +
		 * data queue interrupts). Since vsi->num_q_vectors is number
356 357
		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
		 * original vector count
358
		 */
359
		vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
360
		break;
361 362 363 364
	case ICE_VSI_LB:
		vsi->alloc_txq = 1;
		vsi->alloc_rxq = 1;
		break;
365
	default:
366
		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
367 368
		break;
	}
369 370

	ice_vsi_set_num_desc(vsi);
371 372 373 374 375 376 377 378 379 380 381
}

/**
 * ice_get_free_slot - get the next non-NULL location index in array
 * @array: array to search
 * @size: size of the array
 * @curr: last known occupied index to be used as a search hint
 *
 * void * is being used to keep the functionality generic. This lets us use this
 * function on any array of pointers.
 */
382
static int ice_get_free_slot(void *array, int size, int curr)
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
{
	int **tmp_array = (int **)array;
	int next;

	if (curr < (size - 1) && !tmp_array[curr + 1]) {
		next = curr + 1;
	} else {
		int i = 0;

		while ((i < size) && (tmp_array[i]))
			i++;
		if (i == size)
			next = ICE_NO_VSI;
		else
			next = i;
	}
	return next;
}

402 403 404 405 406 407 408
/**
 * ice_vsi_delete - delete a VSI from the switch
 * @vsi: pointer to VSI being removed
 */
void ice_vsi_delete(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
409
	struct ice_vsi_ctx *ctxt;
410 411
	enum ice_status status;

412 413 414 415
	ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return;

416
	if (vsi->type == ICE_VSI_VF)
417 418
		ctxt->vf_num = vsi->vf_id;
	ctxt->vsi_num = vsi->vsi_num;
419

420
	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
421

422
	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
423 424 425
	if (status)
		dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
			vsi->vsi_num);
426 427

	devm_kfree(&pf->pdev->dev, ctxt);
428 429
}

430
/**
431
 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
432 433
 * @vsi: pointer to VSI being cleared
 */
434
static void ice_vsi_free_arrays(struct ice_vsi *vsi)
435 436 437 438
{
	struct ice_pf *pf = vsi->back;

	/* free the ring and vector containers */
439
	if (vsi->q_vectors) {
440 441 442 443 444 445 446 447 448 449 450
		devm_kfree(&pf->pdev->dev, vsi->q_vectors);
		vsi->q_vectors = NULL;
	}
	if (vsi->tx_rings) {
		devm_kfree(&pf->pdev->dev, vsi->tx_rings);
		vsi->tx_rings = NULL;
	}
	if (vsi->rx_rings) {
		devm_kfree(&pf->pdev->dev, vsi->rx_rings);
		vsi->rx_rings = NULL;
	}
451 452 453 454 455 456 457 458
	if (vsi->txq_map) {
		devm_kfree(&pf->pdev->dev, vsi->txq_map);
		vsi->txq_map = NULL;
	}
	if (vsi->rxq_map) {
		devm_kfree(&pf->pdev->dev, vsi->rxq_map);
		vsi->rxq_map = NULL;
	}
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
}

/**
 * ice_vsi_clear - clean up and deallocate the provided VSI
 * @vsi: pointer to VSI being cleared
 *
 * This deallocates the VSI's queue resources, removes it from the PF's
 * VSI array if necessary, and deallocates the VSI
 *
 * Returns 0 on success, negative on failure
 */
int ice_vsi_clear(struct ice_vsi *vsi)
{
	struct ice_pf *pf = NULL;

	if (!vsi)
		return 0;

	if (!vsi->back)
		return -EINVAL;

	pf = vsi->back;

	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
		dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
			vsi->idx);
		return -EINVAL;
	}

	mutex_lock(&pf->sw_mutex);
	/* updates the PF for this cleared VSI */

	pf->vsi[vsi->idx] = NULL;
	if (vsi->idx < pf->next_vsi)
		pf->next_vsi = vsi->idx;

495
	ice_vsi_free_arrays(vsi);
496 497 498 499 500 501
	mutex_unlock(&pf->sw_mutex);
	devm_kfree(&pf->pdev->dev, vsi);

	return 0;
}

502 503 504 505 506
/**
 * ice_msix_clean_rings - MSIX mode Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a q_vector
 */
507
static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
508 509 510 511 512 513 514 515 516 517 518
{
	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;

	if (!q_vector->tx.ring && !q_vector->rx.ring)
		return IRQ_HANDLED;

	napi_schedule(&q_vector->napi);

	return IRQ_HANDLED;
}

519 520 521 522
/**
 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
 * @pf: board private structure
 * @type: type of VSI
523
 * @vf_id: ID of the VF being configured
524 525 526
 *
 * returns a pointer to a VSI on success, NULL on failure.
 */
527 528
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
{
	struct ice_vsi *vsi = NULL;

	/* Need to protect the allocation of the VSIs at the PF level */
	mutex_lock(&pf->sw_mutex);

	/* If we have already allocated our maximum number of VSIs,
	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
	 * is available to be populated
	 */
	if (pf->next_vsi == ICE_NO_VSI) {
		dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
		goto unlock_pf;
	}

	vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
	if (!vsi)
		goto unlock_pf;

	vsi->type = type;
	vsi->back = pf;
	set_bit(__ICE_DOWN, vsi->state);
	vsi->idx = pf->next_vsi;
	vsi->work_lmt = ICE_DFLT_IRQ_WORK;

554 555 556 557
	if (type == ICE_VSI_VF)
		ice_vsi_set_num_qs(vsi, vf_id);
	else
		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
558 559 560

	switch (vsi->type) {
	case ICE_VSI_PF:
561
		if (ice_vsi_alloc_arrays(vsi))
562 563 564 565 566
			goto err_rings;

		/* Setup default MSIX irq handler for VSI */
		vsi->irq_handler = ice_msix_clean_rings;
		break;
567
	case ICE_VSI_VF:
568
		if (ice_vsi_alloc_arrays(vsi))
569 570
			goto err_rings;
		break;
571 572 573 574
	case ICE_VSI_LB:
		if (ice_vsi_alloc_arrays(vsi))
			goto err_rings;
		break;
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	default:
		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
		goto unlock_pf;
	}

	/* fill VSI slot in the PF struct */
	pf->vsi[pf->next_vsi] = vsi;

	/* prepare pf->next_vsi for next use */
	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
					 pf->next_vsi);
	goto unlock_pf;

err_rings:
	devm_kfree(&pf->pdev->dev, vsi);
	vsi = NULL;
unlock_pf:
	mutex_unlock(&pf->sw_mutex);
	return vsi;
}

596
/**
597 598
 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
599
 *
600
 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
601
 */
602
static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
603
{
604
	int offset, i;
605

606 607 608 609 610 611
	mutex_lock(qs_cfg->qs_mutex);
	offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
					    0, qs_cfg->q_count, 0);
	if (offset >= qs_cfg->pf_map_size) {
		mutex_unlock(qs_cfg->qs_mutex);
		return -ENOMEM;
612 613
	}

614 615 616 617
	bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
	for (i = 0; i < qs_cfg->q_count; i++)
		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
	mutex_unlock(qs_cfg->qs_mutex);
618

619
	return 0;
620 621 622
}

/**
623
 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
624
 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
625
 *
626
 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
627
 */
628
static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
629 630 631
{
	int i, index = 0;

632 633 634 635 636 637 638 639
	mutex_lock(qs_cfg->qs_mutex);
	for (i = 0; i < qs_cfg->q_count; i++) {
		index = find_next_zero_bit(qs_cfg->pf_map,
					   qs_cfg->pf_map_size, index);
		if (index >= qs_cfg->pf_map_size)
			goto err_scatter;
		set_bit(index, qs_cfg->pf_map);
		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
640
	}
641
	mutex_unlock(qs_cfg->qs_mutex);
642 643

	return 0;
644
err_scatter:
645
	for (index = 0; index < i; index++) {
646 647
		clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
		qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
648
	}
649
	mutex_unlock(qs_cfg->qs_mutex);
650 651 652 653

	return -ENOMEM;
}

654 655
/**
 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
656
 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
657
 *
658 659
 * This function first tries to find contiguous space. If it is not successful,
 * it tries with the scatter approach.
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
 *
 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
 */
static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
{
	int ret = 0;

	ret = __ice_vsi_get_qs_contig(qs_cfg);
	if (ret) {
		/* contig failed, so try with scatter approach */
		qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
		qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
					qs_cfg->scatter_count);
		ret = __ice_vsi_get_qs_sc(qs_cfg);
	}
	return ret;
}

678 679 680 681 682 683
/**
 * ice_vsi_get_qs - Assign queues from PF to VSI
 * @vsi: the VSI to assign queues to
 *
 * Returns 0 on success and a negative value on error
 */
684
static int ice_vsi_get_qs(struct ice_vsi *vsi)
685
{
686 687 688 689
	struct ice_pf *pf = vsi->back;
	struct ice_qs_cfg tx_qs_cfg = {
		.qs_mutex = &pf->avail_q_mutex,
		.pf_map = pf->avail_txqs,
690
		.pf_map_size = pf->max_pf_txqs,
691 692 693 694 695 696 697 698 699
		.q_count = vsi->alloc_txq,
		.scatter_count = ICE_MAX_SCATTER_TXQS,
		.vsi_map = vsi->txq_map,
		.vsi_map_offset = 0,
		.mapping_mode = vsi->tx_mapping_mode
	};
	struct ice_qs_cfg rx_qs_cfg = {
		.qs_mutex = &pf->avail_q_mutex,
		.pf_map = pf->avail_rxqs,
700
		.pf_map_size = pf->max_pf_rxqs,
701 702 703 704 705 706
		.q_count = vsi->alloc_rxq,
		.scatter_count = ICE_MAX_SCATTER_RXQS,
		.vsi_map = vsi->rxq_map,
		.vsi_map_offset = 0,
		.mapping_mode = vsi->rx_mapping_mode
	};
707 708 709 710 711
	int ret = 0;

	vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
	vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;

712 713 714
	ret = __ice_vsi_get_qs(&tx_qs_cfg);
	if (!ret)
		ret = __ice_vsi_get_qs(&rx_qs_cfg);
715 716 717 718

	return ret;
}

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
/**
 * ice_vsi_put_qs - Release queues from VSI to PF
 * @vsi: the VSI that is going to release queues
 */
void ice_vsi_put_qs(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	int i;

	mutex_lock(&pf->avail_q_mutex);

	for (i = 0; i < vsi->alloc_txq; i++) {
		clear_bit(vsi->txq_map[i], pf->avail_txqs);
		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
	}

	for (i = 0; i < vsi->alloc_rxq; i++) {
		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
	}

	mutex_unlock(&pf->avail_q_mutex);
}

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
/**
 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
 * @vsi: the VSI being removed
 */
static void ice_rss_clean(struct ice_vsi *vsi)
{
	struct ice_pf *pf;

	pf = vsi->back;

	if (vsi->rss_hkey_user)
		devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
	if (vsi->rss_lut_user)
		devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
}

759 760 761 762
/**
 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
 * @vsi: the VSI being configured
 */
763
static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
{
	struct ice_hw_common_caps *cap;
	struct ice_pf *pf = vsi->back;

	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
		vsi->rss_size = 1;
		return;
	}

	cap = &pf->hw.func_caps.common_cap;
	switch (vsi->type) {
	case ICE_VSI_PF:
		/* PF VSI will inherit RSS instance of PF */
		vsi->rss_table_size = cap->rss_table_size;
		vsi->rss_size = min_t(int, num_online_cpus(),
				      BIT(cap->rss_table_entry_width));
		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
		break;
782 783 784 785 786 787 788 789 790
	case ICE_VSI_VF:
		/* VF VSI will gets a small RSS table
		 * For VSI_LUT, LUT size should be set to 64 bytes
		 */
		vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
		vsi->rss_size = min_t(int, num_online_cpus(),
				      BIT(cap->rss_table_entry_width));
		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
		break;
791 792
	case ICE_VSI_LB:
		break;
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
	default:
		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
			 vsi->type);
		break;
	}
}

/**
 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
 * @ctxt: the VSI context being set
 *
 * This initializes a default VSI context for all sections except the Queues.
 */
static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
{
	u32 table = 0;

	memset(&ctxt->info, 0, sizeof(ctxt->info));
	/* VSI's should be allocated from shared pool */
	ctxt->alloc_from_pool = true;
	/* Src pruning enabled by default */
	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
	/* Traffic from VSI can be sent to LAN */
	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
	/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
	 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
	 * packets untagged/tagged.
	 */
	ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
				  ICE_AQ_VSI_VLAN_MODE_M) >>
				 ICE_AQ_VSI_VLAN_MODE_S);
	/* Have 1:1 UP mapping for both ingress/egress tables */
	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
	ctxt->info.ingress_table = cpu_to_le32(table);
	ctxt->info.egress_table = cpu_to_le32(table);
	/* Have 1:1 UP mapping for outer to inner UP table */
	ctxt->info.outer_up_table = cpu_to_le32(table);
	/* No Outer tag support outer_tag_flags remains to zero */
}

/**
 * ice_vsi_setup_q_map - Setup a VSI queue map
 * @vsi: the VSI being configured
 * @ctxt: VSI context structure
 */
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
847
	u16 offset = 0, qmap = 0, tx_count = 0;
848 849
	u16 qcount_tx = vsi->alloc_txq;
	u16 qcount_rx = vsi->alloc_rxq;
850 851
	u16 tx_numq_tc, rx_numq_tc;
	u16 pow = 0, max_rss = 0;
852
	bool ena_tc0 = false;
853
	u8 netdev_tc = 0;
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
	int i;

	/* at least TC0 should be enabled by default */
	if (vsi->tc_cfg.numtc) {
		if (!(vsi->tc_cfg.ena_tc & BIT(0)))
			ena_tc0 = true;
	} else {
		ena_tc0 = true;
	}

	if (ena_tc0) {
		vsi->tc_cfg.numtc++;
		vsi->tc_cfg.ena_tc |= 1;
	}

869 870 871 872 873 874
	rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
	if (!rx_numq_tc)
		rx_numq_tc = 1;
	tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
	if (!tx_numq_tc)
		tx_numq_tc = 1;
875 876 877 878 879 880 881 882 883 884 885 886 887

	/* TC mapping is a function of the number of Rx queues assigned to the
	 * VSI for each traffic class and the offset of these queues.
	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
	 * queues allocated to TC0. No:of queues is a power-of-2.
	 *
	 * If TC is not enabled, the queue offset is set to 0, and allocate one
	 * queue, this way, traffic for the given TC will be sent to the default
	 * queue.
	 *
	 * Setup number and offset of Rx queues for all TCs for the VSI
	 */

888 889
	qcount_rx = rx_numq_tc;

890 891
	/* qcount will change if RSS is enabled */
	if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
892 893 894 895 896
		if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
			if (vsi->type == ICE_VSI_PF)
				max_rss = ICE_MAX_LG_RSS_QS;
			else
				max_rss = ICE_MAX_SMALL_RSS_QS;
897 898
			qcount_rx = min_t(int, rx_numq_tc, max_rss);
			qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
899
		}
900 901 902
	}

	/* find the (rounded up) power-of-2 of qcount */
903
	pow = order_base_2(qcount_rx);
904

905
	ice_for_each_traffic_class(i) {
906 907 908
		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
			/* TC is not enabled */
			vsi->tc_cfg.tc_info[i].qoffset = 0;
909 910 911
			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
912 913 914 915 916 917
			ctxt->info.tc_mapping[i] = 0;
			continue;
		}

		/* TC is enabled */
		vsi->tc_cfg.tc_info[i].qoffset = offset;
918 919 920
		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
		vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
921 922 923 924 925

		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
			ICE_AQ_VSI_TC_Q_OFFSET_M) |
			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
			 ICE_AQ_VSI_TC_Q_NUM_M);
926 927
		offset += qcount_rx;
		tx_count += tx_numq_tc;
928 929
		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
	}
K
Kiran Patil 已提交
930 931 932 933 934 935 936 937 938 939 940 941

	/* if offset is non-zero, means it is calculated correctly based on
	 * enabled TCs for a given VSI otherwise qcount_rx will always
	 * be correct and non-zero because it is based off - VSI's
	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
	 * at least 1)
	 */
	if (offset)
		vsi->num_rxq = offset;
	else
		vsi->num_rxq = qcount_rx;

942
	vsi->num_txq = tx_count;
943

944 945 946 947 948 949 950 951
	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
		dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
		/* since there is a chance that num_rxq could have been changed
		 * in the above for loop, make num_txq equal to num_rxq.
		 */
		vsi->num_txq = vsi->num_rxq;
	}

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
	/* Rx queue mapping */
	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
	/* q_mapping buffer holds the info for the first queue allocated for
	 * this VSI in the PF space and also the number of queues associated
	 * with this VSI.
	 */
	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
}

/**
 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
 * @ctxt: the VSI context being set
 * @vsi: the VSI being configured
 */
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
	u8 lut_type, hash_type;
970 971 972
	struct ice_pf *pf;

	pf = vsi->back;
973 974 975 976 977 978 979

	switch (vsi->type) {
	case ICE_VSI_PF:
		/* PF VSI will inherit RSS instance of PF */
		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
		break;
980 981 982 983 984
	case ICE_VSI_VF:
		/* VF VSI will gets a small RSS table which is a VSI LUT type */
		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
		break;
985 986 987
	case ICE_VSI_LB:
		dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
		return;
988
	default:
989
		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
		return;
	}

	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
				((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
				 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}

/**
 * ice_vsi_init - Create and initialize a VSI
 * @vsi: the VSI being configured
 *
 * This initializes a VSI context depending on the VSI type to be added and
 * passes it down to the add_vsi aq command to create a new VSI.
 */
1006
static int ice_vsi_init(struct ice_vsi *vsi)
1007 1008 1009
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
1010
	struct ice_vsi_ctx *ctxt;
1011 1012
	int ret = 0;

1013 1014 1015 1016
	ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return -ENOMEM;

1017
	ctxt->info = vsi->info;
1018
	switch (vsi->type) {
1019 1020
	case ICE_VSI_LB:
		/* fall through */
1021
	case ICE_VSI_PF:
1022
		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1023
		break;
1024
	case ICE_VSI_VF:
1025
		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1026
		/* VF number here is the absolute VF number (0-255) */
1027
		ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
1028
		break;
1029 1030 1031 1032
	default:
		return -ENODEV;
	}

1033
	ice_set_dflt_vsi_ctx(ctxt);
1034 1035
	/* if the switch is in VEB mode, allow VSI loopback */
	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1036
		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1037 1038 1039

	/* Set LUT type and HASH type if RSS is enabled */
	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
1040
		ice_set_rss_vsi_ctx(ctxt, vsi);
1041

1042 1043
	ctxt->info.sw_id = vsi->port_info->sw_id;
	ice_vsi_setup_q_map(vsi, ctxt);
1044

1045 1046 1047 1048 1049 1050 1051 1052
	/* Enable MAC Antispoof with new VSI being initialized or updated */
	if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
		ctxt->info.sec_flags |=
			ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
	}

1053 1054 1055 1056 1057 1058 1059
	/* Allow control frames out of main VSI */
	if (vsi->type == ICE_VSI_PF) {
		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
	}

1060
	ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1061 1062 1063 1064 1065 1066 1067
	if (ret) {
		dev_err(&pf->pdev->dev,
			"Add VSI failed, err %d\n", ret);
		return -EIO;
	}

	/* keep context for update VSI operations */
1068
	vsi->info = ctxt->info;
1069 1070

	/* record VSI number returned */
1071
	vsi->vsi_num = ctxt->vsi_num;
1072

1073
	devm_kfree(&pf->pdev->dev, ctxt);
1074 1075 1076
	return ret;
}

1077 1078 1079 1080 1081 1082 1083 1084
/**
 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
 * @vsi: VSI having the memory freed
 * @v_idx: index of the vector to be freed
 */
static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
	struct ice_q_vector *q_vector;
1085
	struct ice_pf *pf = vsi->back;
1086 1087 1088
	struct ice_ring *ring;

	if (!vsi->q_vectors[v_idx]) {
1089
		dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
			v_idx);
		return;
	}
	q_vector = vsi->q_vectors[v_idx];

	ice_for_each_ring(ring, q_vector->tx)
		ring->q_vector = NULL;
	ice_for_each_ring(ring, q_vector->rx)
		ring->q_vector = NULL;

	/* only VSI with an associated netdev is set up with NAPI */
	if (vsi->netdev)
		netif_napi_del(&q_vector->napi);

1104
	devm_kfree(&pf->pdev->dev, q_vector);
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
	vsi->q_vectors[v_idx] = NULL;
}

/**
 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
 * @vsi: the VSI having memory freed
 */
void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
{
	int v_idx;

1116
	ice_for_each_q_vector(vsi, v_idx)
1117 1118 1119 1120 1121 1122 1123 1124
		ice_free_q_vector(vsi, v_idx);
}

/**
 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
 * @vsi: the VSI being configured
 * @v_idx: index of the vector in the VSI struct
 *
1125
 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
 */
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
	struct ice_pf *pf = vsi->back;
	struct ice_q_vector *q_vector;

	/* allocate q_vector */
	q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
	if (!q_vector)
		return -ENOMEM;

	q_vector->vsi = vsi;
	q_vector->v_idx = v_idx;
1139 1140
	if (vsi->type == ICE_VSI_VF)
		goto out;
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
	/* only set affinity_mask if the CPU is online */
	if (cpu_online(v_idx))
		cpumask_set_cpu(v_idx, &q_vector->affinity_mask);

	/* This will not be called in the driver load path because the netdev
	 * will not be created yet. All other cases with register the NAPI
	 * handler here (i.e. resume, reset/rebuild, etc.)
	 */
	if (vsi->netdev)
		netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
			       NAPI_POLL_WEIGHT);

1153
out:
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	/* tie q_vector and VSI together */
	vsi->q_vectors[v_idx] = q_vector;

	return 0;
}

/**
 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
 * @vsi: the VSI being configured
 *
1164
 * We allocate one q_vector per queue interrupt. If allocation fails we
1165 1166
 * return -ENOMEM.
 */
1167
static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
{
	struct ice_pf *pf = vsi->back;
	int v_idx = 0, num_q_vectors;
	int err;

	if (vsi->q_vectors[0]) {
		dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
			vsi->vsi_num);
		return -EEXIST;
	}

1179
	num_q_vectors = vsi->num_q_vectors;
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209

	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
		err = ice_vsi_alloc_q_vector(vsi, v_idx);
		if (err)
			goto err_out;
	}

	return 0;

err_out:
	while (v_idx--)
		ice_free_q_vector(vsi, v_idx);

	dev_err(&pf->pdev->dev,
		"Failed to allocate %d q_vector for VSI %d, ret=%d\n",
		vsi->num_q_vectors, vsi->vsi_num, err);
	vsi->num_q_vectors = 0;
	return err;
}

/**
 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
 * @vsi: ptr to the VSI
 *
 * This should only be called after ice_vsi_alloc() which allocates the
 * corresponding SW VSI structure and initializes num_queue_pairs for the
 * newly allocated VSI.
 *
 * Returns 0 on success or negative on failure
 */
1210
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1211 1212
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
1213
	u16 num_q_vectors;
1214

B
Brett Creeley 已提交
1215 1216 1217 1218 1219 1220 1221
	/* SRIOV doesn't grab irq_tracker entries for each VSI */
	if (vsi->type == ICE_VSI_VF)
		return 0;

	if (vsi->base_vector) {
		dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
			vsi->vsi_num, vsi->base_vector);
1222 1223 1224
		return -EEXIST;
	}

B
Brett Creeley 已提交
1225 1226 1227 1228 1229
	num_q_vectors = vsi->num_q_vectors;
	/* reserve slots from OS requested IRQs */
	vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
				       vsi->idx);
	if (vsi->base_vector < 0) {
1230
		dev_err(&pf->pdev->dev,
B
Brett Creeley 已提交
1231 1232
			"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
			num_q_vectors, vsi->vsi_num, vsi->base_vector);
1233 1234
		return -ENOENT;
	}
B
Brett Creeley 已提交
1235
	pf->num_avail_sw_msix -= num_q_vectors;
1236

1237 1238 1239
	return 0;
}

1240 1241 1242 1243
/**
 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
 * @vsi: the VSI having rings deallocated
 */
1244
static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
{
	int i;

	if (vsi->tx_rings) {
		for (i = 0; i < vsi->alloc_txq; i++) {
			if (vsi->tx_rings[i]) {
				kfree_rcu(vsi->tx_rings[i], rcu);
				vsi->tx_rings[i] = NULL;
			}
		}
	}
	if (vsi->rx_rings) {
		for (i = 0; i < vsi->alloc_rxq; i++) {
			if (vsi->rx_rings[i]) {
				kfree_rcu(vsi->rx_rings[i], rcu);
				vsi->rx_rings[i] = NULL;
			}
		}
	}
}

/**
 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
 * @vsi: VSI which is having rings allocated
 */
1270
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1271 1272 1273 1274
{
	struct ice_pf *pf = vsi->back;
	int i;

1275
	/* Allocate Tx rings */
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	for (i = 0; i < vsi->alloc_txq; i++) {
		struct ice_ring *ring;

		/* allocate with kzalloc(), free with kfree_rcu() */
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);

		if (!ring)
			goto err_out;

		ring->q_index = i;
		ring->reg_idx = vsi->txq_map[i];
		ring->ring_active = false;
		ring->vsi = vsi;
		ring->dev = &pf->pdev->dev;
1290
		ring->count = vsi->num_tx_desc;
1291 1292 1293
		vsi->tx_rings[i] = ring;
	}

1294
	/* Allocate Rx rings */
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
	for (i = 0; i < vsi->alloc_rxq; i++) {
		struct ice_ring *ring;

		/* allocate with kzalloc(), free with kfree_rcu() */
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
		if (!ring)
			goto err_out;

		ring->q_index = i;
		ring->reg_idx = vsi->rxq_map[i];
		ring->ring_active = false;
		ring->vsi = vsi;
		ring->netdev = vsi->netdev;
		ring->dev = &pf->pdev->dev;
1309
		ring->count = vsi->num_rx_desc;
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
		vsi->rx_rings[i] = ring;
	}

	return 0;

err_out:
	ice_vsi_clear_rings(vsi);
	return -ENOMEM;
}

1320 1321 1322 1323 1324 1325 1326 1327
/**
 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
 * @vsi: the VSI being configured
 *
 * This function maps descriptor rings to the queue-specific vectors allotted
 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
 * and Rx rings to the vector as "efficiently" as possible.
 */
1328 1329 1330
#ifdef CONFIG_DCB
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
#else
1331
static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1332
#endif /* CONFIG_DCB */
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
{
	int q_vectors = vsi->num_q_vectors;
	int tx_rings_rem, rx_rings_rem;
	int v_id;

	/* initially assigning remaining rings count to VSIs num queue value */
	tx_rings_rem = vsi->num_txq;
	rx_rings_rem = vsi->num_rxq;

	for (v_id = 0; v_id < q_vectors; v_id++) {
		struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
		int tx_rings_per_v, rx_rings_per_v, q_id, q_base;

		/* Tx rings mapping to vector */
		tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
		q_vector->num_ring_tx = tx_rings_per_v;
		q_vector->tx.ring = NULL;
1350
		q_vector->tx.itr_idx = ICE_TX_ITR;
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
		q_base = vsi->num_txq - tx_rings_rem;

		for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
			struct ice_ring *tx_ring = vsi->tx_rings[q_id];

			tx_ring->q_vector = q_vector;
			tx_ring->next = q_vector->tx.ring;
			q_vector->tx.ring = tx_ring;
		}
		tx_rings_rem -= tx_rings_per_v;

		/* Rx rings mapping to vector */
		rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
		q_vector->num_ring_rx = rx_rings_per_v;
		q_vector->rx.ring = NULL;
1366
		q_vector->rx.itr_idx = ICE_RX_ITR;
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
		q_base = vsi->num_rxq - rx_rings_rem;

		for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
			struct ice_ring *rx_ring = vsi->rx_rings[q_id];

			rx_ring->q_vector = q_vector;
			rx_ring->next = q_vector->rx.ring;
			q_vector->rx.ring = rx_ring;
		}
		rx_rings_rem -= rx_rings_per_v;
	}
}

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
/**
 * ice_vsi_manage_rss_lut - disable/enable RSS
 * @vsi: the VSI being changed
 * @ena: boolean value indicating if this is an enable or disable request
 *
 * In the event of disable request for RSS, this function will zero out RSS
 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
 * LUT.
 */
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{
	int err = 0;
	u8 *lut;

	lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
			   GFP_KERNEL);
	if (!lut)
		return -ENOMEM;

	if (ena) {
		if (vsi->rss_lut_user)
			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
		else
			ice_fill_rss_lut(lut, vsi->rss_table_size,
					 vsi->rss_size);
	}

	err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
	devm_kfree(&vsi->back->pdev->dev, lut);
	return err;
}

1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
/**
 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
 * @vsi: VSI to be configured
 */
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
	struct ice_aqc_get_set_rss_keys *key;
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
	int err = 0;
	u8 *lut;

	vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);

	lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
	if (!lut)
		return -ENOMEM;

	if (vsi->rss_lut_user)
		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
	else
		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);

1435 1436
	status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
				    vsi->rss_table_size);
1437 1438

	if (status) {
1439
		dev_err(&pf->pdev->dev,
1440 1441 1442 1443 1444
			"set_rss_lut failed, error %d\n", status);
		err = -EIO;
		goto ice_vsi_cfg_rss_exit;
	}

1445
	key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
1446 1447 1448 1449 1450 1451
	if (!key) {
		err = -ENOMEM;
		goto ice_vsi_cfg_rss_exit;
	}

	if (vsi->rss_hkey_user)
1452 1453 1454
		memcpy(key,
		       (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
		       ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1455
	else
1456 1457
		netdev_rss_key_fill((void *)key,
				    ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1458

1459
	status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
1460 1461

	if (status) {
1462
		dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
			status);
		err = -EIO;
	}

	devm_kfree(&pf->pdev->dev, key);
ice_vsi_cfg_rss_exit:
	devm_kfree(&pf->pdev->dev, lut);
	return err;
}

1473
/**
1474
 * ice_add_mac_to_list - Add a MAC address filter entry to the list
1475 1476 1477 1478
 * @vsi: the VSI to be forwarded to
 * @add_list: pointer to the list which contains MAC filter entries
 * @macaddr: the MAC address to be added.
 *
1479
 * Adds MAC address filter entry to the temp list
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
 *
 * Returns 0 on success or ENOMEM on failure.
 */
int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
			const u8 *macaddr)
{
	struct ice_fltr_list_entry *tmp;
	struct ice_pf *pf = vsi->back;

	tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
	if (!tmp)
		return -ENOMEM;

	tmp->fltr_info.flag = ICE_FLTR_TX;
1494
	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1495 1496
	tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1497
	tmp->fltr_info.vsi_handle = vsi->idx;
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
	ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);

	INIT_LIST_HEAD(&tmp->list_entry);
	list_add(&tmp->list_entry, add_list);

	return 0;
}

/**
 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
 * @vsi: the VSI to be updated
 */
void ice_update_eth_stats(struct ice_vsi *vsi)
{
	struct ice_eth_stats *prev_es, *cur_es;
	struct ice_hw *hw = &vsi->back->hw;
	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */

	prev_es = &vsi->eth_stats_prev;
	cur_es = &vsi->eth_stats;

1519 1520
	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_bytes, &cur_es->rx_bytes);
1521

1522 1523
	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_unicast, &cur_es->rx_unicast);
1524

1525 1526
	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_multicast, &cur_es->rx_multicast);
1527

1528 1529
	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1530 1531 1532 1533

	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_discards, &cur_es->rx_discards);

1534 1535
	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_bytes, &cur_es->tx_bytes);
1536

1537 1538
	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_unicast, &cur_es->tx_unicast);
1539

1540 1541
	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_multicast, &cur_es->tx_multicast);
1542

1543 1544
	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572

	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_errors, &cur_es->tx_errors);

	vsi->stat_offsets_loaded = true;
}

/**
 * ice_free_fltr_list - free filter lists helper
 * @dev: pointer to the device struct
 * @h: pointer to the list head to be freed
 *
 * Helper function to free filter lists previously created using
 * ice_add_mac_to_list
 */
void ice_free_fltr_list(struct device *dev, struct list_head *h)
{
	struct ice_fltr_list_entry *e, *tmp;

	list_for_each_entry_safe(e, tmp, h, list_entry) {
		list_del(&e->list_entry);
		devm_kfree(dev, e);
	}
}

/**
 * ice_vsi_add_vlan - Add VSI membership for given VLAN
 * @vsi: the VSI being configured
1573
 * @vid: VLAN ID to be added
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
 */
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
{
	struct ice_fltr_list_entry *tmp;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;
	int err = 0;

	tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
	if (!tmp)
		return -ENOMEM;

	tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
	tmp->fltr_info.flag = ICE_FLTR_TX;
1590 1591
	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
	tmp->fltr_info.vsi_handle = vsi->idx;
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	tmp->fltr_info.l_data.vlan.vlan_id = vid;

	INIT_LIST_HEAD(&tmp->list_entry);
	list_add(&tmp->list_entry, &tmp_add_list);

	status = ice_add_vlan(&pf->hw, &tmp_add_list);
	if (status) {
		err = -ENODEV;
		dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
			vid, vsi->vsi_num);
	}

	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
	return err;
}

/**
 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
 * @vsi: the VSI being configured
1611
 * @vid: VLAN ID to be removed
1612 1613 1614 1615 1616 1617 1618 1619
 *
 * Returns 0 on success and negative on failure
 */
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
1620 1621
	enum ice_status status;
	int err = 0;
1622 1623 1624 1625 1626 1627

	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
	if (!list)
		return -ENOMEM;

	list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1628
	list->fltr_info.vsi_handle = vsi->idx;
1629 1630 1631
	list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
	list->fltr_info.l_data.vlan.vlan_id = vid;
	list->fltr_info.flag = ICE_FLTR_TX;
1632
	list->fltr_info.src_id = ICE_SRC_ID_VSI;
1633 1634 1635 1636

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

1637
	status = ice_remove_vlan(&pf->hw, &tmp_add_list);
1638 1639 1640 1641 1642
	if (status == ICE_ERR_DOES_NOT_EXIST) {
		dev_dbg(&pf->pdev->dev,
			"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
			vid, vsi->vsi_num, status);
	} else if (status) {
1643 1644 1645 1646
		dev_err(&pf->pdev->dev,
			"Error removing VLAN %d on vsi %i error: %d\n",
			vid, vsi->vsi_num, status);
		err = -EIO;
1647 1648 1649
	}

	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1650
	return err;
1651 1652
}

1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
/**
 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Rx VSI for operation.
 */
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
	u16 i;

1664 1665 1666
	if (vsi->type == ICE_VSI_VF)
		goto setup_rings;

1667 1668 1669 1670 1671 1672 1673
	if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
		vsi->max_frame = vsi->netdev->mtu +
			ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
	else
		vsi->max_frame = ICE_RXBUF_2048;

	vsi->rx_buf_len = ICE_RXBUF_2048;
1674
setup_rings:
1675
	/* set up individual rings */
1676 1677
	for (i = 0; i < vsi->num_rxq; i++) {
		int err;
1678

1679 1680 1681 1682 1683 1684 1685
		err = ice_setup_rx_ctx(vsi->rx_rings[i]);
		if (err) {
			dev_err(&vsi->back->pdev->dev,
				"ice_setup_rx_ctx failed for RxQ %d, err %d\n",
				i, err);
			return err;
		}
1686
	}
1687 1688

	return 0;
1689 1690
}

1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
/**
 * ice_vsi_cfg_txq - Configure single Tx queue
 * @vsi: the VSI that queue belongs to
 * @ring: Tx ring to be configured
 * @tc_q_idx: queue index within given TC
 * @qg_buf: queue group buffer
 * @tc: TC that Tx ring belongs to
 */
static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
		struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
{
	struct ice_tlan_ctx tlan_ctx = { 0 };
	struct ice_aqc_add_txqs_perq *txq;
	struct ice_pf *pf = vsi->back;
	u8 buf_len = sizeof(*qg_buf);
	enum ice_status status;
	u16 pf_q;

	pf_q = ring->reg_idx;
	ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
	/* copy context contents into the qg_buf */
	qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
	ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
		    ice_tlan_ctx_info);

	/* init queue specific tail reg. It is referred as
	 * transmit comm scheduler queue doorbell.
	 */
	ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);

	/* Add unique software queue handle of the Tx queue per
	 * TC into the VSI Tx ring
	 */
	ring->q_handle = tc_q_idx;

	status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
				 1, qg_buf, buf_len, NULL);
	if (status) {
		dev_err(&pf->pdev->dev,
			"Failed to set LAN Tx queue context, error: %d\n",
			status);
		return -ENODEV;
	}

	/* Add Tx Queue TEID into the VSI Tx ring from the
	 * response. This will complete configuring and
	 * enabling the queue.
	 */
	txq = &qg_buf->txqs[0];
	if (pf_q == le16_to_cpu(txq->txq_id))
		ring->txq_teid = le32_to_cpu(txq->q_teid);

	return 0;
}

1747 1748 1749
/**
 * ice_vsi_cfg_txqs - Configure the VSI for Tx
 * @vsi: the VSI being configured
1750 1751
 * @rings: Tx ring array to be configured
 * @offset: offset within vsi->txq_map
1752 1753 1754 1755
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx VSI for operation.
 */
1756 1757
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
1758 1759 1760
{
	struct ice_aqc_add_tx_qgrp *qg_buf;
	struct ice_pf *pf = vsi->back;
1761 1762 1763
	u16 q_idx = 0, i;
	int err = 0;
	u8 tc;
1764

1765
	qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
1766 1767 1768 1769 1770
	if (!qg_buf)
		return -ENOMEM;

	qg_buf->num_txqs = 1;

1771
	/* set up and configure the Tx queues for each enabled TC */
1772
	ice_for_each_traffic_class(tc) {
1773 1774
		if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
			break;
1775

1776
		for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
1777 1778 1779
			err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
					      qg_buf, tc);
			if (err)
1780
				goto err_cfg_txqs;
1781

1782 1783
			q_idx++;
		}
1784 1785 1786 1787 1788 1789
	}
err_cfg_txqs:
	devm_kfree(&pf->pdev->dev, qg_buf);
	return err;
}

1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
/**
 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx VSI for operation.
 */
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
}

1802 1803 1804 1805 1806 1807 1808 1809
/**
 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
 * @intrl: interrupt rate limit in usecs
 * @gran: interrupt rate limit granularity in usecs
 *
 * This function converts a decimal interrupt rate limit in usecs to the format
 * expected by firmware.
 */
1810
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1811 1812 1813 1814 1815 1816 1817 1818
{
	u32 val = intrl / gran;

	if (val)
		return val | GLINT_RATE_INTRL_ENA_M;
	return 0;
}

1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
/**
 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
 * @hw: board specific structure
 */
static void ice_cfg_itr_gran(struct ice_hw *hw)
{
	u32 regval = rd32(hw, GLINT_CTL);

	/* no need to update global register if ITR gran is already set */
	if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
	    (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
	     GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
	    (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
	     GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
	    (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
	     GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
	    (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
	      GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
		return;

	regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
		  GLINT_CTL_ITR_GRAN_200_M) |
		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
		  GLINT_CTL_ITR_GRAN_100_M) |
		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
		  GLINT_CTL_ITR_GRAN_50_M) |
		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
		  GLINT_CTL_ITR_GRAN_25_M);
	wr32(hw, GLINT_CTL, regval);
}

1850 1851 1852 1853 1854 1855 1856 1857 1858
/**
 * ice_cfg_itr - configure the initial interrupt throttle values
 * @hw: pointer to the HW structure
 * @q_vector: interrupt vector that's being configured
 *
 * Configure interrupt throttling values for the ring containers that are
 * associated with the interrupt vector passed in.
 */
static void
1859
ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
1860
{
1861 1862
	ice_cfg_itr_gran(hw);

1863 1864 1865
	if (q_vector->num_ring_rx) {
		struct ice_ring_container *rc = &q_vector->rx;

1866 1867 1868 1869 1870 1871 1872
		/* if this value is set then don't overwrite with default */
		if (!rc->itr_setting)
			rc->itr_setting = ICE_DFLT_RX_ITR;

		rc->target_itr = ITR_TO_REG(rc->itr_setting);
		rc->next_update = jiffies + 1;
		rc->current_itr = rc->target_itr;
1873
		wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1874
		     ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
1875 1876 1877 1878 1879
	}

	if (q_vector->num_ring_tx) {
		struct ice_ring_container *rc = &q_vector->tx;

1880 1881 1882 1883 1884 1885 1886
		/* if this value is set then don't overwrite with default */
		if (!rc->itr_setting)
			rc->itr_setting = ICE_DFLT_TX_ITR;

		rc->target_itr = ITR_TO_REG(rc->itr_setting);
		rc->next_update = jiffies + 1;
		rc->current_itr = rc->target_itr;
1887
		wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1888
		     ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
1889 1890 1891
	}
}

1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
/**
 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
 * @vsi: the VSI being configured
 * @txq: Tx queue being mapped to MSI-X vector
 * @msix_idx: MSI-X vector index within the function
 * @itr_idx: ITR index of the interrupt cause
 *
 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
 * within the function space.
 */
#ifdef CONFIG_PCI_IOV
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
#else
static void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
#endif /* CONFIG_PCI_IOV */
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 val;

	itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;

	val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
	      ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);

	wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
}

/**
 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
 * @vsi: the VSI being configured
 * @rxq: Rx queue being mapped to MSI-X vector
 * @msix_idx: MSI-X vector index within the function
 * @itr_idx: ITR index of the interrupt cause
 *
 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
 * within the function space.
 */
#ifdef CONFIG_PCI_IOV
void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
#else
static void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
#endif /* CONFIG_PCI_IOV */
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 val;

	itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;

	val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
	      ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);

	wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);

	ice_flush(hw);
}

1954 1955 1956
/**
 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
 * @vsi: the VSI being configured
1957 1958 1959
 *
 * This configures MSIX mode interrupts for the PF VSI, and should not be used
 * for the VF VSI.
1960 1961 1962 1963 1964 1965
 */
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 txq = 0, rxq = 0;
1966
	int i, q;
1967

1968
	for (i = 0; i < vsi->num_q_vectors; i++) {
1969
		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1970
		u16 reg_idx = q_vector->reg_idx;
1971

1972
		ice_cfg_itr(hw, q_vector);
1973

1974
		wr32(hw, GLINT_RATE(reg_idx),
1975
		     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988

		/* Both Transmit Queue Interrupt Cause Control register
		 * and Receive Queue Interrupt Cause control register
		 * expects MSIX_INDX field to be the vector index
		 * within the function space and not the absolute
		 * vector index across PF or across device.
		 * For SR-IOV VF VSIs queue vector index always starts
		 * with 1 since first vector index(0) is used for OICR
		 * in VF space. Since VMDq and other PF VSIs are within
		 * the PF function space, use the vector index that is
		 * tracked for this PF.
		 */
		for (q = 0; q < q_vector->num_ring_tx; q++) {
1989 1990
			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
					      q_vector->tx.itr_idx);
1991 1992 1993 1994
			txq++;
		}

		for (q = 0; q < q_vector->num_ring_rx; q++) {
1995 1996
			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
					      q_vector->rx.itr_idx);
1997 1998 1999 2000 2001
			rxq++;
		}
	}
}

2002 2003 2004 2005 2006 2007 2008 2009
/**
 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
 * @vsi: the VSI being changed
 */
int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
	struct device *dev = &vsi->back->pdev->dev;
	struct ice_hw *hw = &vsi->back->hw;
2010
	struct ice_vsi_ctx *ctxt;
2011
	enum ice_status status;
2012 2013 2014 2015 2016
	int ret = 0;

	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return -ENOMEM;
2017 2018 2019 2020 2021

	/* Here we are configuring the VSI to let the driver add VLAN tags by
	 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
	 * insertion happens in the Tx hot path, in ice_tx_map.
	 */
2022
	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
2023

2024 2025 2026 2027
	/* Preserve existing VLAN strip setting */
	ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
				  ICE_AQ_VSI_VLAN_EMOD_M);

2028
	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
2029

2030
	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2031 2032 2033
	if (status) {
		dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
			status, hw->adminq.sq_last_status);
2034 2035
		ret = -EIO;
		goto out;
2036 2037
	}

2038 2039 2040 2041
	vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
	devm_kfree(dev, ctxt);
	return ret;
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
}

/**
 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
 * @vsi: the VSI being changed
 * @ena: boolean value indicating if this is a enable or disable request
 */
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
	struct device *dev = &vsi->back->pdev->dev;
	struct ice_hw *hw = &vsi->back->hw;
2053
	struct ice_vsi_ctx *ctxt;
2054
	enum ice_status status;
2055 2056 2057 2058 2059
	int ret = 0;

	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return -ENOMEM;
2060 2061 2062 2063 2064

	/* Here we are configuring what the VSI should do with the VLAN tag in
	 * the Rx packet. We can either leave the tag in the packet or put it in
	 * the Rx descriptor.
	 */
2065
	if (ena)
2066
		/* Strip VLAN tag from Rx packet and put it in the desc */
2067 2068
		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
	else
2069
		/* Disable stripping. Leave tag in packet */
2070
		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2071 2072

	/* Allow all packets untagged/tagged */
2073
	ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
2074

2075
	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
2076

2077
	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2078 2079 2080
	if (status) {
		dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
			ena, status, hw->adminq.sq_last_status);
2081 2082
		ret = -EIO;
		goto out;
2083 2084
	}

2085 2086 2087 2088
	vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
	devm_kfree(dev, ctxt);
	return ret;
2089
}
2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112

/**
 * ice_vsi_start_rx_rings - start VSI's Rx rings
 * @vsi: the VSI whose rings are to be started
 *
 * Returns 0 on success and a negative value on error
 */
int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
{
	return ice_vsi_ctrl_rx_rings(vsi, true);
}

/**
 * ice_vsi_stop_rx_rings - stop VSI's Rx rings
 * @vsi: the VSI
 *
 * Returns 0 on success and a negative value on error
 */
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
{
	return ice_vsi_ctrl_rx_rings(vsi, false);
}

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
/**
 * ice_trigger_sw_intr - trigger a software interrupt
 * @hw: pointer to the HW structure
 * @q_vector: interrupt vector to trigger the software interrupt for
 */
void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
{
	wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
	     (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
	     GLINT_DYN_CTL_SWINT_TRIG_M |
	     GLINT_DYN_CTL_INTENA_M);
}

2126
/**
2127
 * ice_vsi_stop_tx_ring - Disable single Tx ring
2128
 * @vsi: the VSI being configured
2129
 * @rst_src: reset source
2130
 * @rel_vmvf_num: Relative ID of VF/VM
2131 2132
 * @ring: Tx ring to be stopped
 * @txq_meta: Meta data of Tx ring to be stopped
2133
 */
2134 2135 2136 2137
#ifndef CONFIG_PCI_IOV
static
#endif /* !CONFIG_PCI_IOV */
int
2138 2139 2140
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
		     u16 rel_vmvf_num, struct ice_ring *ring,
		     struct ice_txq_meta *txq_meta)
2141 2142
{
	struct ice_pf *pf = vsi->back;
2143
	struct ice_q_vector *q_vector;
2144 2145
	struct ice_hw *hw = &pf->hw;
	enum ice_status status;
2146
	u32 val;
2147

2148 2149 2150 2151
	/* clear cause_ena bit for disabled queues */
	val = rd32(hw, QINT_TQCTL(ring->reg_idx));
	val &= ~QINT_TQCTL_CAUSE_ENA_M;
	wr32(hw, QINT_TQCTL(ring->reg_idx), val);
2152

2153 2154
	/* software is expected to wait for 100 ns */
	ndelay(100);
2155

2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
	/* trigger a software interrupt for the vector
	 * associated to the queue to schedule NAPI handler
	 */
	q_vector = ring->q_vector;
	if (q_vector)
		ice_trigger_sw_intr(hw, q_vector);

	status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
				 txq_meta->tc, 1, &txq_meta->q_handle,
				 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
				 rel_vmvf_num, NULL);

	/* if the disable queue command was exercised during an
	 * active reset flow, ICE_ERR_RESET_ONGOING is returned.
	 * This is not an error as the reset operation disables
	 * queues at the hardware level anyway.
	 */
	if (status == ICE_ERR_RESET_ONGOING) {
		dev_dbg(&vsi->back->pdev->dev,
			"Reset in progress. LAN Tx queues already disabled\n");
	} else if (status == ICE_ERR_DOES_NOT_EXIST) {
		dev_dbg(&vsi->back->pdev->dev,
			"LAN Tx queues do not exist, nothing to disable\n");
	} else if (status) {
		dev_err(&vsi->back->pdev->dev,
			"Failed to disable LAN Tx queues, error: %d\n", status);
		return -ENODEV;
2183 2184
	}

2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
	return 0;
}

/**
 * ice_fill_txq_meta - Prepare the Tx queue's meta data
 * @vsi: VSI that ring belongs to
 * @ring: ring that txq_meta will be based on
 * @txq_meta: a helper struct that wraps Tx queue's information
 *
 * Set up a helper struct that will contain all the necessary fields that
 * are needed for stopping Tx queue
 */
2197 2198 2199 2200
#ifndef CONFIG_PCI_IOV
static
#endif /* !CONFIG_PCI_IOV */
void
2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
		  struct ice_txq_meta *txq_meta)
{
	u8 tc = 0;

#ifdef CONFIG_DCB
	tc = ring->dcb_tc;
#endif /* CONFIG_DCB */
	txq_meta->q_id = ring->reg_idx;
	txq_meta->q_teid = ring->txq_teid;
	txq_meta->q_handle = ring->q_handle;
	txq_meta->vsi_idx = vsi->idx;
	txq_meta->tc = tc;
}

/**
 * ice_vsi_stop_tx_rings - Disable Tx rings
 * @vsi: the VSI being configured
 * @rst_src: reset source
 * @rel_vmvf_num: Relative ID of VF/VM
 * @rings: Tx ring array to be stopped
 */
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
		      u16 rel_vmvf_num, struct ice_ring **rings)
{
	u16 i, q_idx = 0;
	int status;
	u8 tc;

	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
		return -EINVAL;
2233

2234 2235 2236 2237 2238 2239
	/* set up the Tx queue list to be disabled for each enabled TC */
	ice_for_each_traffic_class(tc) {
		if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
			break;

		for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
2240
			struct ice_txq_meta txq_meta = { };
2241

2242 2243
			if (!rings || !rings[q_idx])
				return -EINVAL;
2244

2245 2246 2247 2248
			ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
			status = ice_vsi_stop_tx_ring(vsi, rst_src,
						      rel_vmvf_num,
						      rings[q_idx], &txq_meta);
2249

2250 2251
			if (status)
				return status;
2252

2253 2254
			q_idx++;
		}
2255 2256
	}

2257
	return 0;
2258
}
2259

2260 2261 2262 2263
/**
 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
 * @vsi: the VSI being configured
 * @rst_src: reset source
2264
 * @rel_vmvf_num: Relative ID of VF/VM
2265
 */
2266 2267 2268
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
			  u16 rel_vmvf_num)
2269
{
2270
	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
2271 2272
}

2273 2274 2275 2276
/**
 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
 * @vsi: VSI to enable or disable VLAN pruning on
 * @ena: set to true to enable VLAN pruning and false to disable it
2277
 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
2278 2279 2280
 *
 * returns 0 if VSI is updated, negative otherwise
 */
2281
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
2282 2283 2284
{
	struct ice_vsi_ctx *ctxt;
	struct device *dev;
2285
	struct ice_pf *pf;
2286 2287 2288 2289 2290
	int status;

	if (!vsi)
		return -EINVAL;

2291 2292
	pf = vsi->back;
	dev = &pf->pdev->dev;
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return -ENOMEM;

	ctxt->info = vsi->info;

	if (ena) {
		ctxt->info.sec_flags |=
			ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
			ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
		ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
	} else {
		ctxt->info.sec_flags &=
			~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
			  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
		ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
	}

2311 2312 2313 2314
	if (!vlan_promisc)
		ctxt->info.valid_sections =
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
				    ICE_AQ_VSI_PROP_SW_VALID);
2315

2316
	status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
2317
	if (status) {
2318
		netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
2319
			   ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
2320
			   pf->hw.adminq.sq_last_status);
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
		goto err_out;
	}

	vsi->info.sec_flags = ctxt->info.sec_flags;
	vsi->info.sw_flags2 = ctxt->info.sw_flags2;

	devm_kfree(dev, ctxt);
	return 0;

err_out:
	devm_kfree(dev, ctxt);
	return -EIO;
}

2335 2336 2337 2338 2339 2340 2341 2342
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
	struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;

	vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
	vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
}

2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
/**
 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
 * @vsi: VSI to set the q_vectors register index on
 */
static int
ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
{
	u16 i;

	if (!vsi || !vsi->q_vectors)
		return -EINVAL;

	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		if (!q_vector) {
			dev_err(&vsi->back->pdev->dev,
				"Failed to set reg_idx on q_vector %d VSI %d\n",
				i, vsi->vsi_num);
			goto clear_reg_idx;
		}

B
Brett Creeley 已提交
2365 2366 2367 2368 2369 2370 2371 2372
		if (vsi->type == ICE_VSI_VF) {
			struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];

			q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
		} else {
			q_vector->reg_idx =
				q_vector->v_idx + vsi->base_vector;
		}
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387
	}

	return 0;

clear_reg_idx:
	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		if (q_vector)
			q_vector->reg_idx = 0;
	}

	return -EINVAL;
}

2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
/**
 * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
 * @vsi: the VSI being configured
 * @add_rule: boolean value to add or remove ethertype filter rule
 */
static void
ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;

	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
	if (!list)
		return;

	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
	list->fltr_info.fltr_act = ICE_DROP_PACKET;
	list->fltr_info.flag = ICE_FLTR_TX;
	list->fltr_info.src_id = ICE_SRC_ID_VSI;
	list->fltr_info.vsi_handle = vsi->idx;
	list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

	if (add_rule)
		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
	else
		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);

	if (status)
		dev_err(&pf->pdev->dev,
			"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
			vsi->vsi_num, status);

	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
}

2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
/**
 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
 * @vsi: the VSI being configured
 * @tx: bool to determine Tx or Rx rule
 * @create: bool to determine create or remove Rule
 */
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;

	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
	if (!list)
		return;

	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
	list->fltr_info.vsi_handle = vsi->idx;
2447
	list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475

	if (tx) {
		list->fltr_info.fltr_act = ICE_DROP_PACKET;
		list->fltr_info.flag = ICE_FLTR_TX;
		list->fltr_info.src_id = ICE_SRC_ID_VSI;
	} else {
		list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
		list->fltr_info.flag = ICE_FLTR_RX;
		list->fltr_info.src_id = ICE_SRC_ID_LPORT;
	}

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

	if (create)
		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
	else
		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);

	if (status)
		dev_err(&pf->pdev->dev,
			"Fail %s %s LLDP rule on VSI %i error: %d\n",
			create ? "adding" : "removing", tx ? "TX" : "RX",
			vsi->vsi_num, status);

	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
}

2476 2477 2478 2479 2480
/**
 * ice_vsi_setup - Set up a VSI by a given type
 * @pf: board private structure
 * @pi: pointer to the port_info instance
 * @type: VSI type
2481
 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
 *         used only for ICE_VSI_VF VSI type. For other VSI types, should
 *         fill-in ICE_INVAL_VFID as input.
 *
 * This allocates the sw VSI structure and its queue resources.
 *
 * Returns pointer to the successfully allocated and configured VSI sw struct on
 * success, NULL on failure.
 */
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2492
	      enum ice_vsi_type type, u16 vf_id)
2493 2494 2495
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
	struct device *dev = &pf->pdev->dev;
2496
	enum ice_status status;
2497 2498 2499
	struct ice_vsi *vsi;
	int ret, i;

2500 2501 2502 2503 2504
	if (type == ICE_VSI_VF)
		vsi = ice_vsi_alloc(pf, type, vf_id);
	else
		vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);

2505 2506 2507 2508 2509 2510 2511
	if (!vsi) {
		dev_err(dev, "could not allocate VSI\n");
		return NULL;
	}

	vsi->port_info = pi;
	vsi->vsw = pf->first_sw;
2512 2513 2514
	if (vsi->type == ICE_VSI_PF)
		vsi->ethtype = ETH_P_PAUSE;

2515 2516
	if (vsi->type == ICE_VSI_VF)
		vsi->vf_id = vf_id;
2517 2518 2519 2520 2521 2522 2523 2524 2525 2526

	if (ice_vsi_get_qs(vsi)) {
		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
			vsi->idx);
		goto unroll_get_qs;
	}

	/* set RSS capabilities */
	ice_vsi_set_rss_params(vsi);

2527
	/* set TC configuration */
2528 2529
	ice_vsi_set_tc_cfg(vsi);

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
	/* create the VSI */
	ret = ice_vsi_init(vsi);
	if (ret)
		goto unroll_get_qs;

	switch (vsi->type) {
	case ICE_VSI_PF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto unroll_vsi_init;

		ret = ice_vsi_setup_vector_base(vsi);
		if (ret)
			goto unroll_alloc_q_vector;

2545 2546 2547 2548
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto unroll_vector_base;

2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_vector_base;

		ice_vsi_map_rings_to_vectors(vsi);

		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
			ice_vsi_cfg_rss_lut_key(vsi);
		break;
2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
	case ICE_VSI_VF:
		/* VF driver will take care of creating netdev for this type and
		 * map queues to vectors through Virtchnl, PF driver only
		 * creates a VSI and corresponding structures for bookkeeping
		 * purpose
		 */
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto unroll_vsi_init;

		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_alloc_q_vector;

2576 2577 2578 2579
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto unroll_vector_base;

2580 2581
		pf->q_left_tx -= vsi->alloc_txq;
		pf->q_left_rx -= vsi->alloc_rxq;
2582 2583 2584 2585 2586 2587 2588

		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
			ice_vsi_cfg_rss_lut_key(vsi);
2589
		break;
2590 2591 2592 2593 2594
	case ICE_VSI_LB:
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_vsi_init;
		break;
2595
	default:
2596
		/* clean up the resources and exit */
2597 2598 2599 2600 2601
		goto unroll_vsi_init;
	}

	/* configure VSI nodes based on number of queues and TC's */
	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2602
		max_txqs[i] = vsi->alloc_txq;
2603

2604 2605 2606
	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);
	if (status) {
2607 2608
		dev_err(&pf->pdev->dev,
			"VSI %d failed lan queue config, error %d\n",
2609
			vsi->vsi_num, status);
2610 2611 2612
		goto unroll_vector_base;
	}

2613 2614 2615 2616 2617
	/* Add switch rule to drop all Tx Flow Control Frames, of look up
	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
	 * The rule is added once for PF VSI in order to create appropriate
	 * recipe, since VSI/VSI list is ignored with drop action...
2618 2619
	 * Also add rules to handle LLDP Tx and Rx packets.  Tx LLDP packets
	 * need to be dropped so that VFs cannot send LLDP packets to reconfig
2620
	 * DCB settings in the HW.  Also, if the FW DCBX engine is not running
2621
	 * then Rx LLDP packets need to be redirected up the stack.
2622
	 */
2623
	if (vsi->type == ICE_VSI_PF) {
2624 2625
		ice_vsi_add_rem_eth_mac(vsi, true);

2626 2627 2628 2629
		/* Tx LLDP packets */
		ice_cfg_sw_lldp(vsi, true, true);

		/* Rx LLDP packets */
2630
		if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2631 2632 2633
			ice_cfg_sw_lldp(vsi, false, true);
	}

2634 2635 2636
	return vsi;

unroll_vector_base:
2637
	/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2638
	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2639
	pf->num_avail_sw_msix += vsi->num_q_vectors;
2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
unroll_alloc_q_vector:
	ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
	ice_vsi_delete(vsi);
unroll_get_qs:
	ice_vsi_put_qs(vsi);
	pf->q_left_tx += vsi->alloc_txq;
	pf->q_left_rx += vsi->alloc_rxq;
	ice_vsi_clear(vsi);

	return NULL;
}

2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664
/**
 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
 * @vsi: the VSI being cleaned up
 */
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 txq = 0;
	u32 rxq = 0;
	int i, q;

B
Brett Creeley 已提交
2665
	for (i = 0; i < vsi->num_q_vectors; i++) {
2666
		struct ice_q_vector *q_vector = vsi->q_vectors[i];
B
Brett Creeley 已提交
2667
		u16 reg_idx = q_vector->reg_idx;
2668

B
Brett Creeley 已提交
2669 2670
		wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
		wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691
		for (q = 0; q < q_vector->num_ring_tx; q++) {
			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
			txq++;
		}

		for (q = 0; q < q_vector->num_ring_rx; q++) {
			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
			rxq++;
		}
	}

	ice_flush(hw);
}

/**
 * ice_vsi_free_irq - Free the IRQ association with the OS
 * @vsi: the VSI being configured
 */
void ice_vsi_free_irq(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
2692
	int base = vsi->base_vector;
2693
	int i;
2694

2695 2696
	if (!vsi->q_vectors || !vsi->irqs_ready)
		return;
2697

2698 2699 2700
	ice_vsi_release_msix(vsi);
	if (vsi->type == ICE_VSI_VF)
		return;
2701

2702 2703 2704 2705
	vsi->irqs_ready = false;
	ice_for_each_q_vector(vsi, i) {
		u16 vector = i + base;
		int irq_num;
2706

2707
		irq_num = pf->msix_entries[vector].vector;
2708

2709 2710 2711 2712 2713
		/* free only the irqs that were actually requested */
		if (!vsi->q_vectors[i] ||
		    !(vsi->q_vectors[i]->num_ring_tx ||
		      vsi->q_vectors[i]->num_ring_rx))
			continue;
2714

2715 2716
		/* clear the affinity notifier in the IRQ descriptor */
		irq_set_affinity_notifier(irq_num, NULL);
2717

2718 2719 2720 2721 2722
		/* clear the affinity_mask in the IRQ descriptor */
		irq_set_affinity_hint(irq_num, NULL);
		synchronize_irq(irq_num);
		devm_free_irq(&pf->pdev->dev, irq_num,
			      vsi->q_vectors[i]);
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757
	}
}

/**
 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
 * @vsi: the VSI having resources freed
 */
void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->tx_rings)
		return;

	ice_for_each_txq(vsi, i)
		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
			ice_free_tx_ring(vsi->tx_rings[i]);
}

/**
 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
 * @vsi: the VSI having resources freed
 */
void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->rx_rings)
		return;

	ice_for_each_rxq(vsi, i)
		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
			ice_free_rx_ring(vsi->rx_rings[i]);
}

2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
/**
 * ice_vsi_close - Shut down a VSI
 * @vsi: the VSI being shut down
 */
void ice_vsi_close(struct ice_vsi *vsi)
{
	if (!test_and_set_bit(__ICE_DOWN, vsi->state))
		ice_down(vsi);

	ice_vsi_free_irq(vsi);
	ice_vsi_free_tx_rings(vsi);
	ice_vsi_free_rx_rings(vsi);
}

2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
/**
 * ice_free_res - free a block of resources
 * @res: pointer to the resource
 * @index: starting index previously returned by ice_get_res
 * @id: identifier to track owner
 *
 * Returns number of resources freed
 */
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
{
	int count = 0;
	int i;

B
Brett Creeley 已提交
2785
	if (!res || index >= res->end)
2786 2787 2788
		return -EINVAL;

	id |= ICE_RES_VALID_BIT;
B
Brett Creeley 已提交
2789
	for (i = index; i < res->end && res->list[i] == id; i++) {
2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
		res->list[i] = 0;
		count++;
	}

	return count;
}

/**
 * ice_search_res - Search the tracker for a block of resources
 * @res: pointer to the resource
 * @needed: size of the block needed
 * @id: identifier to track owner
 *
 * Returns the base item index of the block, or -ENOMEM for error
 */
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
B
Brett Creeley 已提交
2807
	int start = 0, end = 0;
2808

B
Brett Creeley 已提交
2809
	if (needed > res->end)
2810 2811
		return -ENOMEM;

2812 2813 2814 2815 2816 2817
	id |= ICE_RES_VALID_BIT;

	do {
		/* skip already allocated entries */
		if (res->list[end++] & ICE_RES_VALID_BIT) {
			start = end;
B
Brett Creeley 已提交
2818
			if ((start + needed) > res->end)
2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830
				break;
		}

		if (end == (start + needed)) {
			int i = start;

			/* there was enough, so assign it to the requestor */
			while (i != end)
				res->list[i++] = id;

			return start;
		}
B
Brett Creeley 已提交
2831
	} while (end < res->end);
2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842

	return -ENOMEM;
}

/**
 * ice_get_res - get a block of resources
 * @pf: board private structure
 * @res: pointer to the resource
 * @needed: size of the block needed
 * @id: identifier to track owner
 *
B
Brett Creeley 已提交
2843
 * Returns the base item index of the block, or negative for error
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
 */
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
	if (!res || !pf)
		return -EINVAL;

	if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
		dev_err(&pf->pdev->dev,
			"param err: needed=%d, num_entries = %d id=0x%04x\n",
			needed, res->num_entries, id);
		return -EINVAL;
	}

B
Brett Creeley 已提交
2858
	return ice_search_res(res, needed, id);
2859 2860 2861 2862 2863 2864 2865 2866
}

/**
 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
 * @vsi: the VSI being un-configured
 */
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
B
Brett Creeley 已提交
2867
	int base = vsi->base_vector;
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 val;
	int i;

	/* disable interrupt causation from each queue */
	if (vsi->tx_rings) {
		ice_for_each_txq(vsi, i) {
			if (vsi->tx_rings[i]) {
				u16 reg;

				reg = vsi->tx_rings[i]->reg_idx;
				val = rd32(hw, QINT_TQCTL(reg));
				val &= ~QINT_TQCTL_CAUSE_ENA_M;
				wr32(hw, QINT_TQCTL(reg), val);
			}
		}
	}

	if (vsi->rx_rings) {
		ice_for_each_rxq(vsi, i) {
			if (vsi->rx_rings[i]) {
				u16 reg;

				reg = vsi->rx_rings[i]->reg_idx;
				val = rd32(hw, QINT_RQCTL(reg));
				val &= ~QINT_RQCTL_CAUSE_ENA_M;
				wr32(hw, QINT_RQCTL(reg), val);
			}
		}
	}

	/* disable each interrupt */
2901 2902
	ice_for_each_q_vector(vsi, i)
		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2903

2904
	ice_flush(hw);
2905

2906 2907 2908 2909
	/* don't call synchronize_irq() for VF's from the host */
	if (vsi->type == ICE_VSI_VF)
		return;

2910 2911
	ice_for_each_q_vector(vsi, i)
		synchronize_irq(pf->msix_entries[i + base].vector);
2912 2913
}

2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928
/**
 * ice_napi_del - Remove NAPI handler for the VSI
 * @vsi: VSI for which NAPI handler is to be removed
 */
void ice_napi_del(struct ice_vsi *vsi)
{
	int v_idx;

	if (!vsi->netdev)
		return;

	ice_for_each_q_vector(vsi, v_idx)
		netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}

2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941
/**
 * ice_vsi_release - Delete a VSI and free its resources
 * @vsi: the VSI being removed
 *
 * Returns 0 on success or < 0 on error
 */
int ice_vsi_release(struct ice_vsi *vsi)
{
	struct ice_pf *pf;

	if (!vsi->back)
		return -ENODEV;
	pf = vsi->back;
2942

2943 2944 2945 2946 2947
	/* do not unregister while driver is in the reset recovery pending
	 * state. Since reset/rebuild happens through PF service task workqueue,
	 * it's not a good idea to unregister netdev that is associated to the
	 * PF that is running the work queue items currently. This is done to
	 * avoid check_flush_dependency() warning on this wq
2948
	 */
2949
	if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
2950 2951 2952 2953 2954 2955
		unregister_netdev(vsi->netdev);

	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
		ice_rss_clean(vsi);

	/* Disable VSI and free resources */
2956 2957
	if (vsi->type != ICE_VSI_LB)
		ice_vsi_dis_irq(vsi);
2958 2959
	ice_vsi_close(vsi);

B
Brett Creeley 已提交
2960 2961 2962 2963 2964
	/* SR-IOV determines needed MSIX resources all at once instead of per
	 * VSI since when VFs are spawned we know how many VFs there are and how
	 * many interrupts each VF needs. SR-IOV MSIX resources are also
	 * cleared in the same manner.
	 */
2965 2966
	if (vsi->type != ICE_VSI_VF) {
		/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2967
		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2968 2969
		pf->num_avail_sw_msix += vsi->num_q_vectors;
	}
2970

2971
	if (vsi->type == ICE_VSI_PF) {
2972
		ice_vsi_add_rem_eth_mac(vsi, false);
2973 2974 2975 2976
		ice_cfg_sw_lldp(vsi, true, false);
		/* The Rx rule will only exist to remove if the LLDP FW
		 * engine is currently stopped
		 */
2977
		if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2978 2979
			ice_cfg_sw_lldp(vsi, false, false);
	}
2980

2981
	ice_remove_vsi_fltr(&pf->hw, vsi->idx);
2982
	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2983 2984
	ice_vsi_delete(vsi);
	ice_vsi_free_q_vectors(vsi);
2985 2986 2987 2988 2989 2990 2991

	/* make sure unregister_netdev() was called by checking __ICE_DOWN */
	if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
		free_netdev(vsi->netdev);
		vsi->netdev = NULL;
	}

2992 2993 2994 2995 2996 2997 2998 2999 3000 3001
	ice_vsi_clear_rings(vsi);

	ice_vsi_put_qs(vsi);
	pf->q_left_tx += vsi->alloc_txq;
	pf->q_left_rx += vsi->alloc_rxq;

	/* retain SW VSI data structure since it is needed to unregister and
	 * free VSI netdev when PF is not in reset recovery pending state,\
	 * for ex: during rmmod.
	 */
3002
	if (!ice_is_reset_in_progress(pf->state))
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016
		ice_vsi_clear(vsi);

	return 0;
}

/**
 * ice_vsi_rebuild - Rebuild VSI after reset
 * @vsi: VSI to be rebuild
 *
 * Returns 0 on success and negative value on failure
 */
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3017
	struct ice_vf *vf = NULL;
3018
	enum ice_status status;
3019
	struct ice_pf *pf;
3020 3021 3022 3023 3024
	int ret, i;

	if (!vsi)
		return -EINVAL;

3025
	pf = vsi->back;
3026 3027 3028
	if (vsi->type == ICE_VSI_VF)
		vf = &pf->vf[vsi->vf_id];

3029
	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
3030
	ice_vsi_free_q_vectors(vsi);
3031

B
Brett Creeley 已提交
3032 3033 3034 3035 3036
	/* SR-IOV determines needed MSIX resources all at once instead of per
	 * VSI since when VFs are spawned we know how many VFs there are and how
	 * many interrupts each VF needs. SR-IOV MSIX resources are also
	 * cleared in the same manner.
	 */
3037 3038
	if (vsi->type != ICE_VSI_VF) {
		/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
3039
		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
3040
		pf->num_avail_sw_msix += vsi->num_q_vectors;
B
Brett Creeley 已提交
3041
		vsi->base_vector = 0;
3042 3043
	}

3044
	ice_vsi_put_qs(vsi);
3045
	ice_vsi_clear_rings(vsi);
3046
	ice_vsi_free_arrays(vsi);
3047
	ice_dev_onetime_setup(&pf->hw);
3048 3049 3050 3051
	if (vsi->type == ICE_VSI_VF)
		ice_vsi_set_num_qs(vsi, vf->vf_id);
	else
		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
3052 3053 3054 3055 3056 3057

	ret = ice_vsi_alloc_arrays(vsi);
	if (ret < 0)
		goto err_vsi;

	ice_vsi_get_qs(vsi);
3058
	ice_vsi_set_tc_cfg(vsi);
3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071

	/* Initialize VSI struct elements and create VSI in FW */
	ret = ice_vsi_init(vsi);
	if (ret < 0)
		goto err_vsi;


	switch (vsi->type) {
	case ICE_VSI_PF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto err_rings;

3072 3073 3074 3075
		ret = ice_vsi_setup_vector_base(vsi);
		if (ret)
			goto err_vectors;

3076 3077 3078 3079
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto err_vectors;

3080 3081 3082 3083 3084
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto err_vectors;

		ice_vsi_map_rings_to_vectors(vsi);
3085 3086 3087 3088
		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
3089
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3090
			ice_vsi_cfg_rss_lut_key(vsi);
3091
		break;
3092 3093 3094 3095 3096
	case ICE_VSI_VF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto err_rings;

3097 3098 3099 3100
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto err_vectors;

3101 3102 3103 3104
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto err_vectors;

3105 3106
		pf->q_left_tx -= vsi->alloc_txq;
		pf->q_left_rx -= vsi->alloc_rxq;
3107
		break;
3108 3109 3110 3111 3112 3113
	default:
		break;
	}

	/* configure VSI nodes based on number of queues and TC's */
	for (i = 0; i < vsi->tc_cfg.numtc; i++)
3114
		max_txqs[i] = vsi->alloc_txq;
3115

3116 3117 3118
	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);
	if (status) {
3119 3120
		dev_err(&pf->pdev->dev,
			"VSI %d failed lan queue config, error %d\n",
3121
			vsi->vsi_num, status);
3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136
		goto err_vectors;
	}
	return 0;

err_vectors:
	ice_vsi_free_q_vectors(vsi);
err_rings:
	if (vsi->netdev) {
		vsi->current_netdev_flags = 0;
		unregister_netdev(vsi->netdev);
		free_netdev(vsi->netdev);
		vsi->netdev = NULL;
	}
err_vsi:
	ice_vsi_clear(vsi);
3137
	set_bit(__ICE_RESET_FAILED, pf->state);
3138 3139 3140
	return ret;
}

3141
/**
3142
 * ice_is_reset_in_progress - check for a reset in progress
3143
 * @state: PF state field
3144
 */
3145
bool ice_is_reset_in_progress(unsigned long *state)
3146
{
3147 3148 3149 3150
	return test_bit(__ICE_RESET_OICR_RECV, state) ||
	       test_bit(__ICE_PFR_REQ, state) ||
	       test_bit(__ICE_CORER_REQ, state) ||
	       test_bit(__ICE_GLOBR_REQ, state);
3151
}
3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230

#ifdef CONFIG_DCB
/**
 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
 * @vsi: VSI being configured
 * @ctx: the context buffer returned from AQ VSI update command
 */
static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
{
	vsi->info.mapping_flags = ctx->info.mapping_flags;
	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
	       sizeof(vsi->info.q_mapping));
	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
	       sizeof(vsi->info.tc_mapping));
}

/**
 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
 * @vsi: the VSI being configured
 * @ena_tc: TC map to be enabled
 */
static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
{
	struct net_device *netdev = vsi->netdev;
	struct ice_pf *pf = vsi->back;
	struct ice_dcbx_cfg *dcbcfg;
	u8 netdev_tc;
	int i;

	if (!netdev)
		return;

	if (!ena_tc) {
		netdev_reset_tc(netdev);
		return;
	}

	if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
		return;

	dcbcfg = &pf->hw.port_info->local_dcbx_cfg;

	ice_for_each_traffic_class(i)
		if (vsi->tc_cfg.ena_tc & BIT(i))
			netdev_set_tc_queue(netdev,
					    vsi->tc_cfg.tc_info[i].netdev_tc,
					    vsi->tc_cfg.tc_info[i].qcount_tx,
					    vsi->tc_cfg.tc_info[i].qoffset);

	for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
		u8 ets_tc = dcbcfg->etscfg.prio_table[i];

		/* Get the mapped netdev TC# for the UP */
		netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
		netdev_set_prio_tc_map(netdev, i, netdev_tc);
	}
}

/**
 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
 * @vsi: VSI to be configured
 * @ena_tc: TC bitmap
 *
 * VSI queues expected to be quiesced before calling this function
 */
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
	struct ice_vsi_ctx *ctx;
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
	int i, ret = 0;
	u8 num_tc = 0;

	ice_for_each_traffic_class(i) {
		/* build bitmap of enabled TCs */
		if (ena_tc & BIT(i))
			num_tc++;
		/* populate max_txqs per TC */
3231
		max_txqs[i] = vsi->alloc_txq;
3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273
	}

	vsi->tc_cfg.ena_tc = ena_tc;
	vsi->tc_cfg.numtc = num_tc;

	ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

	ctx->vf_num = 0;
	ctx->info = vsi->info;

	ice_vsi_setup_q_map(vsi, ctx);

	/* must to indicate which section of VSI context are being modified */
	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
	status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
	if (status) {
		dev_info(&pf->pdev->dev, "Failed VSI Update\n");
		ret = -EIO;
		goto out;
	}

	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);

	if (status) {
		dev_err(&pf->pdev->dev,
			"VSI %d failed TC config, error %d\n",
			vsi->vsi_num, status);
		ret = -EIO;
		goto out;
	}
	ice_vsi_update_q_map(vsi, ctx);
	vsi->info.valid_sections = 0;

	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
out:
	devm_kfree(&pf->pdev->dev, ctx);
	return ret;
}
#endif /* CONFIG_DCB */
3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303

/**
 * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
 * @vsi: the VSI being configured MAC filter
 * @macaddr: the MAC address to be added.
 * @set: Add or delete a MAC filter
 *
 * Adds or removes MAC address filter entry for VF VSI
 */
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
{
	LIST_HEAD(tmp_add_list);
	enum ice_status status;

	 /* Update MAC filter list to be added or removed for a VSI */
	if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
		status = ICE_ERR_NO_MEMORY;
		goto cfg_mac_fltr_exit;
	}

	if (set)
		status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
	else
		status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);

cfg_mac_fltr_exit:
	ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
	return status;
}