ice_lib.c 61.7 KB
Newer Older
1 2 3 4
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */

#include "ice.h"
A
Anirudh Venkataramanan 已提交
5
#include "ice_base.h"
6
#include "ice_lib.h"
7
#include "ice_dcb_lib.h"
8

9 10 11 12 13 14 15 16 17 18 19 20
/**
 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
 * @vsi: the VSI being configured
 * @ena: start or stop the Rx rings
 */
static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
{
	int i, ret = 0;

	for (i = 0; i < vsi->num_rxq; i++) {
		ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
		if (ret)
21 22 23 24 25 26
			break;
	}

	return ret;
}

27 28 29 30 31 32 33
/**
 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
 * @vsi: VSI pointer
 *
 * On error: returns error code (negative)
 * On success: returns 0
 */
34
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
35 36 37 38 39
{
	struct ice_pf *pf = vsi->back;

	/* allocate memory for both Tx and Rx ring pointers */
	vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
40
				     sizeof(*vsi->tx_rings), GFP_KERNEL);
41
	if (!vsi->tx_rings)
42
		return -ENOMEM;
43 44

	vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
45
				     sizeof(*vsi->rx_rings), GFP_KERNEL);
46
	if (!vsi->rx_rings)
47 48 49 50 51 52 53 54 55 56 57 58 59
		goto err_rings;

	vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
				    sizeof(*vsi->txq_map), GFP_KERNEL);

	if (!vsi->txq_map)
		goto err_txq_map;

	vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
				    sizeof(*vsi->rxq_map), GFP_KERNEL);
	if (!vsi->rxq_map)
		goto err_rxq_map;

60 61 62 63
	/* There is no need to allocate q_vectors for a loopback VSI. */
	if (vsi->type == ICE_VSI_LB)
		return 0;

64 65 66 67 68
	/* allocate memory for q_vector pointers */
	vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
				      sizeof(*vsi->q_vectors), GFP_KERNEL);
	if (!vsi->q_vectors)
		goto err_vectors;
69 70 71 72

	return 0;

err_vectors:
73 74 75 76
	devm_kfree(&pf->pdev->dev, vsi->rxq_map);
err_rxq_map:
	devm_kfree(&pf->pdev->dev, vsi->txq_map);
err_txq_map:
77
	devm_kfree(&pf->pdev->dev, vsi->rx_rings);
78
err_rings:
79 80 81 82 83
	devm_kfree(&pf->pdev->dev, vsi->tx_rings);
	return -ENOMEM;
}

/**
84 85 86 87 88 89 90
 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
 * @vsi: the VSI being configured
 */
static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
	switch (vsi->type) {
	case ICE_VSI_PF:
91 92
		/* fall through */
	case ICE_VSI_LB:
93 94 95 96 97 98 99 100 101 102 103 104 105
		vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
		vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
		break;
	default:
		dev_dbg(&vsi->back->pdev->dev,
			"Not setting number of Tx/Rx descriptors for VSI type %d\n",
			vsi->type);
		break;
	}
}

/**
 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
106
 * @vsi: the VSI being configured
107
 * @vf_id: ID of the VF being configured
108 109 110
 *
 * Return 0 on success and a negative value on error
 */
111
static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
112 113
{
	struct ice_pf *pf = vsi->back;
114 115 116 117 118
	struct ice_vf *vf = NULL;

	if (vsi->type == ICE_VSI_VF)
		vsi->vf_id = vf_id;

119 120
	switch (vsi->type) {
	case ICE_VSI_PF:
121 122 123 124 125 126 127 128 129 130 131 132 133 134
		vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
				       num_online_cpus());

		pf->num_lan_tx = vsi->alloc_txq;

		/* only 1 Rx queue unless RSS is enabled */
		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
			vsi->alloc_rxq = 1;
		else
			vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
					       num_online_cpus());

		pf->num_lan_rx = vsi->alloc_rxq;

135
		vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
136
		break;
137
	case ICE_VSI_VF:
138 139 140
		vf = &pf->vf[vsi->vf_id];
		vsi->alloc_txq = vf->num_vf_qs;
		vsi->alloc_rxq = vf->num_vf_qs;
141 142
		/* pf->num_vf_msix includes (VF miscellaneous vector +
		 * data queue interrupts). Since vsi->num_q_vectors is number
143 144
		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
		 * original vector count
145
		 */
146
		vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
147
		break;
148 149 150 151
	case ICE_VSI_LB:
		vsi->alloc_txq = 1;
		vsi->alloc_rxq = 1;
		break;
152
	default:
153
		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
154 155
		break;
	}
156 157

	ice_vsi_set_num_desc(vsi);
158 159 160 161 162 163 164 165 166 167 168
}

/**
 * ice_get_free_slot - get the next non-NULL location index in array
 * @array: array to search
 * @size: size of the array
 * @curr: last known occupied index to be used as a search hint
 *
 * void * is being used to keep the functionality generic. This lets us use this
 * function on any array of pointers.
 */
169
static int ice_get_free_slot(void *array, int size, int curr)
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
{
	int **tmp_array = (int **)array;
	int next;

	if (curr < (size - 1) && !tmp_array[curr + 1]) {
		next = curr + 1;
	} else {
		int i = 0;

		while ((i < size) && (tmp_array[i]))
			i++;
		if (i == size)
			next = ICE_NO_VSI;
		else
			next = i;
	}
	return next;
}

189 190 191 192 193 194 195
/**
 * ice_vsi_delete - delete a VSI from the switch
 * @vsi: pointer to VSI being removed
 */
void ice_vsi_delete(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
196
	struct ice_vsi_ctx *ctxt;
197 198
	enum ice_status status;

199 200 201 202
	ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return;

203
	if (vsi->type == ICE_VSI_VF)
204 205
		ctxt->vf_num = vsi->vf_id;
	ctxt->vsi_num = vsi->vsi_num;
206

207
	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
208

209
	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
210 211 212
	if (status)
		dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
			vsi->vsi_num);
213 214

	devm_kfree(&pf->pdev->dev, ctxt);
215 216
}

217
/**
218
 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
219 220
 * @vsi: pointer to VSI being cleared
 */
221
static void ice_vsi_free_arrays(struct ice_vsi *vsi)
222 223 224 225
{
	struct ice_pf *pf = vsi->back;

	/* free the ring and vector containers */
226
	if (vsi->q_vectors) {
227 228 229 230 231 232 233 234 235 236 237
		devm_kfree(&pf->pdev->dev, vsi->q_vectors);
		vsi->q_vectors = NULL;
	}
	if (vsi->tx_rings) {
		devm_kfree(&pf->pdev->dev, vsi->tx_rings);
		vsi->tx_rings = NULL;
	}
	if (vsi->rx_rings) {
		devm_kfree(&pf->pdev->dev, vsi->rx_rings);
		vsi->rx_rings = NULL;
	}
238 239 240 241 242 243 244 245
	if (vsi->txq_map) {
		devm_kfree(&pf->pdev->dev, vsi->txq_map);
		vsi->txq_map = NULL;
	}
	if (vsi->rxq_map) {
		devm_kfree(&pf->pdev->dev, vsi->rxq_map);
		vsi->rxq_map = NULL;
	}
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
}

/**
 * ice_vsi_clear - clean up and deallocate the provided VSI
 * @vsi: pointer to VSI being cleared
 *
 * This deallocates the VSI's queue resources, removes it from the PF's
 * VSI array if necessary, and deallocates the VSI
 *
 * Returns 0 on success, negative on failure
 */
int ice_vsi_clear(struct ice_vsi *vsi)
{
	struct ice_pf *pf = NULL;

	if (!vsi)
		return 0;

	if (!vsi->back)
		return -EINVAL;

	pf = vsi->back;

	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
		dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
			vsi->idx);
		return -EINVAL;
	}

	mutex_lock(&pf->sw_mutex);
	/* updates the PF for this cleared VSI */

	pf->vsi[vsi->idx] = NULL;
	if (vsi->idx < pf->next_vsi)
		pf->next_vsi = vsi->idx;

282
	ice_vsi_free_arrays(vsi);
283 284 285 286 287 288
	mutex_unlock(&pf->sw_mutex);
	devm_kfree(&pf->pdev->dev, vsi);

	return 0;
}

289 290 291 292 293
/**
 * ice_msix_clean_rings - MSIX mode Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a q_vector
 */
294
static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
295 296 297 298 299 300 301 302 303 304 305
{
	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;

	if (!q_vector->tx.ring && !q_vector->rx.ring)
		return IRQ_HANDLED;

	napi_schedule(&q_vector->napi);

	return IRQ_HANDLED;
}

306 307 308 309
/**
 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
 * @pf: board private structure
 * @type: type of VSI
310
 * @vf_id: ID of the VF being configured
311 312 313
 *
 * returns a pointer to a VSI on success, NULL on failure.
 */
314 315
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
{
	struct ice_vsi *vsi = NULL;

	/* Need to protect the allocation of the VSIs at the PF level */
	mutex_lock(&pf->sw_mutex);

	/* If we have already allocated our maximum number of VSIs,
	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
	 * is available to be populated
	 */
	if (pf->next_vsi == ICE_NO_VSI) {
		dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
		goto unlock_pf;
	}

	vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
	if (!vsi)
		goto unlock_pf;

	vsi->type = type;
	vsi->back = pf;
	set_bit(__ICE_DOWN, vsi->state);
338

339 340
	vsi->idx = pf->next_vsi;

341 342 343 344
	if (type == ICE_VSI_VF)
		ice_vsi_set_num_qs(vsi, vf_id);
	else
		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
345 346 347

	switch (vsi->type) {
	case ICE_VSI_PF:
348
		if (ice_vsi_alloc_arrays(vsi))
349 350 351 352 353
			goto err_rings;

		/* Setup default MSIX irq handler for VSI */
		vsi->irq_handler = ice_msix_clean_rings;
		break;
354
	case ICE_VSI_VF:
355
		if (ice_vsi_alloc_arrays(vsi))
356 357
			goto err_rings;
		break;
358 359 360 361
	case ICE_VSI_LB:
		if (ice_vsi_alloc_arrays(vsi))
			goto err_rings;
		break;
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
	default:
		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
		goto unlock_pf;
	}

	/* fill VSI slot in the PF struct */
	pf->vsi[pf->next_vsi] = vsi;

	/* prepare pf->next_vsi for next use */
	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
					 pf->next_vsi);
	goto unlock_pf;

err_rings:
	devm_kfree(&pf->pdev->dev, vsi);
	vsi = NULL;
unlock_pf:
	mutex_unlock(&pf->sw_mutex);
	return vsi;
}

383 384 385 386 387 388
/**
 * ice_vsi_get_qs - Assign queues from PF to VSI
 * @vsi: the VSI to assign queues to
 *
 * Returns 0 on success and a negative value on error
 */
389
static int ice_vsi_get_qs(struct ice_vsi *vsi)
390
{
391 392 393 394
	struct ice_pf *pf = vsi->back;
	struct ice_qs_cfg tx_qs_cfg = {
		.qs_mutex = &pf->avail_q_mutex,
		.pf_map = pf->avail_txqs,
395
		.pf_map_size = pf->max_pf_txqs,
396 397 398 399 400 401 402 403 404
		.q_count = vsi->alloc_txq,
		.scatter_count = ICE_MAX_SCATTER_TXQS,
		.vsi_map = vsi->txq_map,
		.vsi_map_offset = 0,
		.mapping_mode = vsi->tx_mapping_mode
	};
	struct ice_qs_cfg rx_qs_cfg = {
		.qs_mutex = &pf->avail_q_mutex,
		.pf_map = pf->avail_rxqs,
405
		.pf_map_size = pf->max_pf_rxqs,
406 407 408 409 410 411
		.q_count = vsi->alloc_rxq,
		.scatter_count = ICE_MAX_SCATTER_RXQS,
		.vsi_map = vsi->rxq_map,
		.vsi_map_offset = 0,
		.mapping_mode = vsi->rx_mapping_mode
	};
412 413 414 415 416
	int ret = 0;

	vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
	vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;

417 418 419
	ret = __ice_vsi_get_qs(&tx_qs_cfg);
	if (!ret)
		ret = __ice_vsi_get_qs(&rx_qs_cfg);
420 421 422 423

	return ret;
}

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
/**
 * ice_vsi_put_qs - Release queues from VSI to PF
 * @vsi: the VSI that is going to release queues
 */
void ice_vsi_put_qs(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	int i;

	mutex_lock(&pf->avail_q_mutex);

	for (i = 0; i < vsi->alloc_txq; i++) {
		clear_bit(vsi->txq_map[i], pf->avail_txqs);
		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
	}

	for (i = 0; i < vsi->alloc_rxq; i++) {
		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
	}

	mutex_unlock(&pf->avail_q_mutex);
}

T
Tony Nguyen 已提交
448 449 450 451 452 453 454 455 456 457 458
/**
 * ice_is_safe_mode
 * @pf: pointer to the PF struct
 *
 * returns true if driver is in safe mode, false otherwise
 */
bool ice_is_safe_mode(struct ice_pf *pf)
{
	return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
}

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
/**
 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
 * @vsi: the VSI being removed
 */
static void ice_rss_clean(struct ice_vsi *vsi)
{
	struct ice_pf *pf;

	pf = vsi->back;

	if (vsi->rss_hkey_user)
		devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
	if (vsi->rss_lut_user)
		devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
}

475 476 477 478
/**
 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
 * @vsi: the VSI being configured
 */
479
static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
{
	struct ice_hw_common_caps *cap;
	struct ice_pf *pf = vsi->back;

	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
		vsi->rss_size = 1;
		return;
	}

	cap = &pf->hw.func_caps.common_cap;
	switch (vsi->type) {
	case ICE_VSI_PF:
		/* PF VSI will inherit RSS instance of PF */
		vsi->rss_table_size = cap->rss_table_size;
		vsi->rss_size = min_t(int, num_online_cpus(),
				      BIT(cap->rss_table_entry_width));
		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
		break;
498 499 500 501 502 503 504 505 506
	case ICE_VSI_VF:
		/* VF VSI will gets a small RSS table
		 * For VSI_LUT, LUT size should be set to 64 bytes
		 */
		vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
		vsi->rss_size = min_t(int, num_online_cpus(),
				      BIT(cap->rss_table_entry_width));
		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
		break;
507 508
	case ICE_VSI_LB:
		break;
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	default:
		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
			 vsi->type);
		break;
	}
}

/**
 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
 * @ctxt: the VSI context being set
 *
 * This initializes a default VSI context for all sections except the Queues.
 */
static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
{
	u32 table = 0;

	memset(&ctxt->info, 0, sizeof(ctxt->info));
	/* VSI's should be allocated from shared pool */
	ctxt->alloc_from_pool = true;
	/* Src pruning enabled by default */
	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
	/* Traffic from VSI can be sent to LAN */
	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
	/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
	 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
	 * packets untagged/tagged.
	 */
	ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
				  ICE_AQ_VSI_VLAN_MODE_M) >>
				 ICE_AQ_VSI_VLAN_MODE_S);
	/* Have 1:1 UP mapping for both ingress/egress tables */
	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
	ctxt->info.ingress_table = cpu_to_le32(table);
	ctxt->info.egress_table = cpu_to_le32(table);
	/* Have 1:1 UP mapping for outer to inner UP table */
	ctxt->info.outer_up_table = cpu_to_le32(table);
	/* No Outer tag support outer_tag_flags remains to zero */
}

/**
 * ice_vsi_setup_q_map - Setup a VSI queue map
 * @vsi: the VSI being configured
 * @ctxt: VSI context structure
 */
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
563
	u16 offset = 0, qmap = 0, tx_count = 0;
564 565
	u16 qcount_tx = vsi->alloc_txq;
	u16 qcount_rx = vsi->alloc_rxq;
566 567
	u16 tx_numq_tc, rx_numq_tc;
	u16 pow = 0, max_rss = 0;
568
	bool ena_tc0 = false;
569
	u8 netdev_tc = 0;
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	int i;

	/* at least TC0 should be enabled by default */
	if (vsi->tc_cfg.numtc) {
		if (!(vsi->tc_cfg.ena_tc & BIT(0)))
			ena_tc0 = true;
	} else {
		ena_tc0 = true;
	}

	if (ena_tc0) {
		vsi->tc_cfg.numtc++;
		vsi->tc_cfg.ena_tc |= 1;
	}

585 586 587 588 589 590
	rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
	if (!rx_numq_tc)
		rx_numq_tc = 1;
	tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
	if (!tx_numq_tc)
		tx_numq_tc = 1;
591 592 593 594 595 596 597 598 599 600 601 602 603

	/* TC mapping is a function of the number of Rx queues assigned to the
	 * VSI for each traffic class and the offset of these queues.
	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
	 * queues allocated to TC0. No:of queues is a power-of-2.
	 *
	 * If TC is not enabled, the queue offset is set to 0, and allocate one
	 * queue, this way, traffic for the given TC will be sent to the default
	 * queue.
	 *
	 * Setup number and offset of Rx queues for all TCs for the VSI
	 */

604 605
	qcount_rx = rx_numq_tc;

606 607
	/* qcount will change if RSS is enabled */
	if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
608 609 610 611 612
		if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
			if (vsi->type == ICE_VSI_PF)
				max_rss = ICE_MAX_LG_RSS_QS;
			else
				max_rss = ICE_MAX_SMALL_RSS_QS;
613 614
			qcount_rx = min_t(int, rx_numq_tc, max_rss);
			qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
615
		}
616 617 618
	}

	/* find the (rounded up) power-of-2 of qcount */
619
	pow = order_base_2(qcount_rx);
620

621
	ice_for_each_traffic_class(i) {
622 623 624
		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
			/* TC is not enabled */
			vsi->tc_cfg.tc_info[i].qoffset = 0;
625 626 627
			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
628 629 630 631 632 633
			ctxt->info.tc_mapping[i] = 0;
			continue;
		}

		/* TC is enabled */
		vsi->tc_cfg.tc_info[i].qoffset = offset;
634 635 636
		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
		vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
637 638 639 640 641

		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
			ICE_AQ_VSI_TC_Q_OFFSET_M) |
			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
			 ICE_AQ_VSI_TC_Q_NUM_M);
642 643
		offset += qcount_rx;
		tx_count += tx_numq_tc;
644 645
		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
	}
K
Kiran Patil 已提交
646 647 648 649 650 651 652 653 654 655 656 657

	/* if offset is non-zero, means it is calculated correctly based on
	 * enabled TCs for a given VSI otherwise qcount_rx will always
	 * be correct and non-zero because it is based off - VSI's
	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
	 * at least 1)
	 */
	if (offset)
		vsi->num_rxq = offset;
	else
		vsi->num_rxq = qcount_rx;

658
	vsi->num_txq = tx_count;
659

660 661 662 663 664 665 666 667
	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
		dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
		/* since there is a chance that num_rxq could have been changed
		 * in the above for loop, make num_txq equal to num_rxq.
		 */
		vsi->num_txq = vsi->num_rxq;
	}

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
	/* Rx queue mapping */
	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
	/* q_mapping buffer holds the info for the first queue allocated for
	 * this VSI in the PF space and also the number of queues associated
	 * with this VSI.
	 */
	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
}

/**
 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
 * @ctxt: the VSI context being set
 * @vsi: the VSI being configured
 */
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
	u8 lut_type, hash_type;
686 687 688
	struct ice_pf *pf;

	pf = vsi->back;
689 690 691 692 693 694 695

	switch (vsi->type) {
	case ICE_VSI_PF:
		/* PF VSI will inherit RSS instance of PF */
		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
		break;
696 697 698 699 700
	case ICE_VSI_VF:
		/* VF VSI will gets a small RSS table which is a VSI LUT type */
		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
		break;
701 702 703
	case ICE_VSI_LB:
		dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
		return;
704
	default:
705
		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
		return;
	}

	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
				((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
				 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}

/**
 * ice_vsi_init - Create and initialize a VSI
 * @vsi: the VSI being configured
 *
 * This initializes a VSI context depending on the VSI type to be added and
 * passes it down to the add_vsi aq command to create a new VSI.
 */
722
static int ice_vsi_init(struct ice_vsi *vsi)
723 724 725
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
726
	struct ice_vsi_ctx *ctxt;
727 728
	int ret = 0;

729 730 731 732
	ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return -ENOMEM;

733
	ctxt->info = vsi->info;
734
	switch (vsi->type) {
735 736
	case ICE_VSI_LB:
		/* fall through */
737
	case ICE_VSI_PF:
738
		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
739
		break;
740
	case ICE_VSI_VF:
741
		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
742
		/* VF number here is the absolute VF number (0-255) */
743
		ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
744
		break;
745 746 747 748
	default:
		return -ENODEV;
	}

749
	ice_set_dflt_vsi_ctx(ctxt);
750 751
	/* if the switch is in VEB mode, allow VSI loopback */
	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
752
		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
753 754 755

	/* Set LUT type and HASH type if RSS is enabled */
	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
756
		ice_set_rss_vsi_ctx(ctxt, vsi);
757

758 759
	ctxt->info.sw_id = vsi->port_info->sw_id;
	ice_vsi_setup_q_map(vsi, ctxt);
760

761 762 763 764 765 766 767 768
	/* Enable MAC Antispoof with new VSI being initialized or updated */
	if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
		ctxt->info.sec_flags |=
			ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
	}

769 770 771 772 773 774 775
	/* Allow control frames out of main VSI */
	if (vsi->type == ICE_VSI_PF) {
		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
	}

776
	ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
777 778 779 780 781 782 783
	if (ret) {
		dev_err(&pf->pdev->dev,
			"Add VSI failed, err %d\n", ret);
		return -EIO;
	}

	/* keep context for update VSI operations */
784
	vsi->info = ctxt->info;
785 786

	/* record VSI number returned */
787
	vsi->vsi_num = ctxt->vsi_num;
788

789
	devm_kfree(&pf->pdev->dev, ctxt);
790 791 792
	return ret;
}

793 794 795 796 797 798 799 800 801 802
/**
 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
 * @vsi: ptr to the VSI
 *
 * This should only be called after ice_vsi_alloc() which allocates the
 * corresponding SW VSI structure and initializes num_queue_pairs for the
 * newly allocated VSI.
 *
 * Returns 0 on success or negative on failure
 */
803
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
804 805
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
806
	u16 num_q_vectors;
807

B
Brett Creeley 已提交
808 809 810 811 812 813 814
	/* SRIOV doesn't grab irq_tracker entries for each VSI */
	if (vsi->type == ICE_VSI_VF)
		return 0;

	if (vsi->base_vector) {
		dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
			vsi->vsi_num, vsi->base_vector);
815 816 817
		return -EEXIST;
	}

B
Brett Creeley 已提交
818 819 820 821 822
	num_q_vectors = vsi->num_q_vectors;
	/* reserve slots from OS requested IRQs */
	vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
				       vsi->idx);
	if (vsi->base_vector < 0) {
823
		dev_err(&pf->pdev->dev,
B
Brett Creeley 已提交
824 825
			"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
			num_q_vectors, vsi->vsi_num, vsi->base_vector);
826 827
		return -ENOENT;
	}
B
Brett Creeley 已提交
828
	pf->num_avail_sw_msix -= num_q_vectors;
829

830 831 832
	return 0;
}

833 834 835 836
/**
 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
 * @vsi: the VSI having rings deallocated
 */
837
static void ice_vsi_clear_rings(struct ice_vsi *vsi)
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
{
	int i;

	if (vsi->tx_rings) {
		for (i = 0; i < vsi->alloc_txq; i++) {
			if (vsi->tx_rings[i]) {
				kfree_rcu(vsi->tx_rings[i], rcu);
				vsi->tx_rings[i] = NULL;
			}
		}
	}
	if (vsi->rx_rings) {
		for (i = 0; i < vsi->alloc_rxq; i++) {
			if (vsi->rx_rings[i]) {
				kfree_rcu(vsi->rx_rings[i], rcu);
				vsi->rx_rings[i] = NULL;
			}
		}
	}
}

/**
 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
 * @vsi: VSI which is having rings allocated
 */
863
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
864 865 866 867
{
	struct ice_pf *pf = vsi->back;
	int i;

868
	/* Allocate Tx rings */
869 870 871 872 873 874 875 876 877 878 879 880 881 882
	for (i = 0; i < vsi->alloc_txq; i++) {
		struct ice_ring *ring;

		/* allocate with kzalloc(), free with kfree_rcu() */
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);

		if (!ring)
			goto err_out;

		ring->q_index = i;
		ring->reg_idx = vsi->txq_map[i];
		ring->ring_active = false;
		ring->vsi = vsi;
		ring->dev = &pf->pdev->dev;
883
		ring->count = vsi->num_tx_desc;
884 885 886
		vsi->tx_rings[i] = ring;
	}

887
	/* Allocate Rx rings */
888 889 890 891 892 893 894 895 896 897 898 899 900 901
	for (i = 0; i < vsi->alloc_rxq; i++) {
		struct ice_ring *ring;

		/* allocate with kzalloc(), free with kfree_rcu() */
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
		if (!ring)
			goto err_out;

		ring->q_index = i;
		ring->reg_idx = vsi->rxq_map[i];
		ring->ring_active = false;
		ring->vsi = vsi;
		ring->netdev = vsi->netdev;
		ring->dev = &pf->pdev->dev;
902
		ring->count = vsi->num_rx_desc;
903 904 905 906 907 908 909 910 911 912
		vsi->rx_rings[i] = ring;
	}

	return 0;

err_out:
	ice_vsi_clear_rings(vsi);
	return -ENOMEM;
}

913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
/**
 * ice_vsi_manage_rss_lut - disable/enable RSS
 * @vsi: the VSI being changed
 * @ena: boolean value indicating if this is an enable or disable request
 *
 * In the event of disable request for RSS, this function will zero out RSS
 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
 * LUT.
 */
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{
	int err = 0;
	u8 *lut;

	lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
			   GFP_KERNEL);
	if (!lut)
		return -ENOMEM;

	if (ena) {
		if (vsi->rss_lut_user)
			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
		else
			ice_fill_rss_lut(lut, vsi->rss_table_size,
					 vsi->rss_size);
	}

	err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
	devm_kfree(&vsi->back->pdev->dev, lut);
	return err;
}

945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
/**
 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
 * @vsi: VSI to be configured
 */
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
	struct ice_aqc_get_set_rss_keys *key;
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
	int err = 0;
	u8 *lut;

	vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);

	lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
	if (!lut)
		return -ENOMEM;

	if (vsi->rss_lut_user)
		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
	else
		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);

968 969
	status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
				    vsi->rss_table_size);
970 971

	if (status) {
972
		dev_err(&pf->pdev->dev,
973 974 975 976 977
			"set_rss_lut failed, error %d\n", status);
		err = -EIO;
		goto ice_vsi_cfg_rss_exit;
	}

978
	key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
979 980 981 982 983 984
	if (!key) {
		err = -ENOMEM;
		goto ice_vsi_cfg_rss_exit;
	}

	if (vsi->rss_hkey_user)
985 986 987
		memcpy(key,
		       (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
		       ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
988
	else
989 990
		netdev_rss_key_fill((void *)key,
				    ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
991

992
	status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
993 994

	if (status) {
995
		dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
996 997 998 999 1000 1001 1002 1003 1004 1005
			status);
		err = -EIO;
	}

	devm_kfree(&pf->pdev->dev, key);
ice_vsi_cfg_rss_exit:
	devm_kfree(&pf->pdev->dev, lut);
	return err;
}

1006
/**
1007
 * ice_add_mac_to_list - Add a MAC address filter entry to the list
1008 1009 1010 1011
 * @vsi: the VSI to be forwarded to
 * @add_list: pointer to the list which contains MAC filter entries
 * @macaddr: the MAC address to be added.
 *
1012
 * Adds MAC address filter entry to the temp list
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
 *
 * Returns 0 on success or ENOMEM on failure.
 */
int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
			const u8 *macaddr)
{
	struct ice_fltr_list_entry *tmp;
	struct ice_pf *pf = vsi->back;

	tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
	if (!tmp)
		return -ENOMEM;

	tmp->fltr_info.flag = ICE_FLTR_TX;
1027
	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1028 1029
	tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1030
	tmp->fltr_info.vsi_handle = vsi->idx;
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
	ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);

	INIT_LIST_HEAD(&tmp->list_entry);
	list_add(&tmp->list_entry, add_list);

	return 0;
}

/**
 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
 * @vsi: the VSI to be updated
 */
void ice_update_eth_stats(struct ice_vsi *vsi)
{
	struct ice_eth_stats *prev_es, *cur_es;
	struct ice_hw *hw = &vsi->back->hw;
	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */

	prev_es = &vsi->eth_stats_prev;
	cur_es = &vsi->eth_stats;

1052 1053
	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_bytes, &cur_es->rx_bytes);
1054

1055 1056
	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_unicast, &cur_es->rx_unicast);
1057

1058 1059
	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_multicast, &cur_es->rx_multicast);
1060

1061 1062
	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1063 1064 1065 1066

	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_discards, &cur_es->rx_discards);

1067 1068
	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_bytes, &cur_es->tx_bytes);
1069

1070 1071
	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_unicast, &cur_es->tx_unicast);
1072

1073 1074
	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_multicast, &cur_es->tx_multicast);
1075

1076 1077
	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105

	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_errors, &cur_es->tx_errors);

	vsi->stat_offsets_loaded = true;
}

/**
 * ice_free_fltr_list - free filter lists helper
 * @dev: pointer to the device struct
 * @h: pointer to the list head to be freed
 *
 * Helper function to free filter lists previously created using
 * ice_add_mac_to_list
 */
void ice_free_fltr_list(struct device *dev, struct list_head *h)
{
	struct ice_fltr_list_entry *e, *tmp;

	list_for_each_entry_safe(e, tmp, h, list_entry) {
		list_del(&e->list_entry);
		devm_kfree(dev, e);
	}
}

/**
 * ice_vsi_add_vlan - Add VSI membership for given VLAN
 * @vsi: the VSI being configured
1106
 * @vid: VLAN ID to be added
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
 */
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
{
	struct ice_fltr_list_entry *tmp;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;
	int err = 0;

	tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
	if (!tmp)
		return -ENOMEM;

	tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
	tmp->fltr_info.flag = ICE_FLTR_TX;
1123 1124
	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
	tmp->fltr_info.vsi_handle = vsi->idx;
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	tmp->fltr_info.l_data.vlan.vlan_id = vid;

	INIT_LIST_HEAD(&tmp->list_entry);
	list_add(&tmp->list_entry, &tmp_add_list);

	status = ice_add_vlan(&pf->hw, &tmp_add_list);
	if (status) {
		err = -ENODEV;
		dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
			vid, vsi->vsi_num);
	}

	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
	return err;
}

/**
 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
 * @vsi: the VSI being configured
1144
 * @vid: VLAN ID to be removed
1145 1146 1147 1148 1149 1150 1151 1152
 *
 * Returns 0 on success and negative on failure
 */
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
1153 1154
	enum ice_status status;
	int err = 0;
1155 1156 1157 1158 1159 1160

	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
	if (!list)
		return -ENOMEM;

	list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1161
	list->fltr_info.vsi_handle = vsi->idx;
1162 1163 1164
	list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
	list->fltr_info.l_data.vlan.vlan_id = vid;
	list->fltr_info.flag = ICE_FLTR_TX;
1165
	list->fltr_info.src_id = ICE_SRC_ID_VSI;
1166 1167 1168 1169

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

1170
	status = ice_remove_vlan(&pf->hw, &tmp_add_list);
1171 1172 1173 1174 1175
	if (status == ICE_ERR_DOES_NOT_EXIST) {
		dev_dbg(&pf->pdev->dev,
			"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
			vid, vsi->vsi_num, status);
	} else if (status) {
1176 1177 1178 1179
		dev_err(&pf->pdev->dev,
			"Error removing VLAN %d on vsi %i error: %d\n",
			vid, vsi->vsi_num, status);
		err = -EIO;
1180 1181 1182
	}

	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1183
	return err;
1184 1185
}

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
/**
 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Rx VSI for operation.
 */
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
	u16 i;

1197 1198 1199
	if (vsi->type == ICE_VSI_VF)
		goto setup_rings;

1200 1201 1202 1203 1204 1205 1206
	if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
		vsi->max_frame = vsi->netdev->mtu +
			ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
	else
		vsi->max_frame = ICE_RXBUF_2048;

	vsi->rx_buf_len = ICE_RXBUF_2048;
1207
setup_rings:
1208
	/* set up individual rings */
1209 1210
	for (i = 0; i < vsi->num_rxq; i++) {
		int err;
1211

1212 1213 1214 1215 1216 1217 1218
		err = ice_setup_rx_ctx(vsi->rx_rings[i]);
		if (err) {
			dev_err(&vsi->back->pdev->dev,
				"ice_setup_rx_ctx failed for RxQ %d, err %d\n",
				i, err);
			return err;
		}
1219
	}
1220 1221

	return 0;
1222 1223 1224 1225 1226
}

/**
 * ice_vsi_cfg_txqs - Configure the VSI for Tx
 * @vsi: the VSI being configured
1227
 * @rings: Tx ring array to be configured
1228 1229 1230 1231
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx VSI for operation.
 */
1232
static int
1233
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1234 1235
{
	struct ice_aqc_add_tx_qgrp *qg_buf;
1236
	u16 q_idx = 0;
1237
	int err = 0;
1238

1239
	qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
1240 1241 1242 1243 1244
	if (!qg_buf)
		return -ENOMEM;

	qg_buf->num_txqs = 1;

1245 1246 1247 1248
	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
		if (err)
			goto err_cfg_txqs;
1249
	}
1250

1251
err_cfg_txqs:
1252
	kfree(qg_buf);
1253 1254 1255
	return err;
}

1256 1257 1258 1259 1260 1261 1262 1263 1264
/**
 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx VSI for operation.
 */
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
1265
	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
1266 1267
}

1268 1269 1270 1271 1272 1273 1274 1275
/**
 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
 * @intrl: interrupt rate limit in usecs
 * @gran: interrupt rate limit granularity in usecs
 *
 * This function converts a decimal interrupt rate limit in usecs to the format
 * expected by firmware.
 */
1276
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1277 1278 1279 1280 1281 1282 1283 1284
{
	u32 val = intrl / gran;

	if (val)
		return val | GLINT_RATE_INTRL_ENA_M;
	return 0;
}

1285 1286 1287
/**
 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
 * @vsi: the VSI being configured
1288 1289 1290
 *
 * This configures MSIX mode interrupts for the PF VSI, and should not be used
 * for the VF VSI.
1291 1292 1293 1294 1295 1296
 */
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 txq = 0, rxq = 0;
1297
	int i, q;
1298

1299
	for (i = 0; i < vsi->num_q_vectors; i++) {
1300
		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1301
		u16 reg_idx = q_vector->reg_idx;
1302

1303
		ice_cfg_itr(hw, q_vector);
1304

1305
		wr32(hw, GLINT_RATE(reg_idx),
1306
		     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

		/* Both Transmit Queue Interrupt Cause Control register
		 * and Receive Queue Interrupt Cause control register
		 * expects MSIX_INDX field to be the vector index
		 * within the function space and not the absolute
		 * vector index across PF or across device.
		 * For SR-IOV VF VSIs queue vector index always starts
		 * with 1 since first vector index(0) is used for OICR
		 * in VF space. Since VMDq and other PF VSIs are within
		 * the PF function space, use the vector index that is
		 * tracked for this PF.
		 */
		for (q = 0; q < q_vector->num_ring_tx; q++) {
1320 1321
			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
					      q_vector->tx.itr_idx);
1322 1323 1324 1325
			txq++;
		}

		for (q = 0; q < q_vector->num_ring_rx; q++) {
1326 1327
			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
					      q_vector->rx.itr_idx);
1328 1329 1330 1331 1332
			rxq++;
		}
	}
}

1333 1334 1335 1336 1337 1338 1339 1340
/**
 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
 * @vsi: the VSI being changed
 */
int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
	struct device *dev = &vsi->back->pdev->dev;
	struct ice_hw *hw = &vsi->back->hw;
1341
	struct ice_vsi_ctx *ctxt;
1342
	enum ice_status status;
1343 1344 1345 1346 1347
	int ret = 0;

	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return -ENOMEM;
1348 1349 1350 1351 1352

	/* Here we are configuring the VSI to let the driver add VLAN tags by
	 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
	 * insertion happens in the Tx hot path, in ice_tx_map.
	 */
1353
	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1354

1355 1356 1357 1358
	/* Preserve existing VLAN strip setting */
	ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
				  ICE_AQ_VSI_VLAN_EMOD_M);

1359
	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1360

1361
	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1362 1363 1364
	if (status) {
		dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
			status, hw->adminq.sq_last_status);
1365 1366
		ret = -EIO;
		goto out;
1367 1368
	}

1369 1370 1371 1372
	vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
	devm_kfree(dev, ctxt);
	return ret;
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
}

/**
 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
 * @vsi: the VSI being changed
 * @ena: boolean value indicating if this is a enable or disable request
 */
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
	struct device *dev = &vsi->back->pdev->dev;
	struct ice_hw *hw = &vsi->back->hw;
1384
	struct ice_vsi_ctx *ctxt;
1385
	enum ice_status status;
1386 1387 1388 1389 1390
	int ret = 0;

	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return -ENOMEM;
1391 1392 1393 1394 1395

	/* Here we are configuring what the VSI should do with the VLAN tag in
	 * the Rx packet. We can either leave the tag in the packet or put it in
	 * the Rx descriptor.
	 */
1396
	if (ena)
1397
		/* Strip VLAN tag from Rx packet and put it in the desc */
1398 1399
		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
	else
1400
		/* Disable stripping. Leave tag in packet */
1401
		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1402 1403

	/* Allow all packets untagged/tagged */
1404
	ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
1405

1406
	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1407

1408
	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1409 1410 1411
	if (status) {
		dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
			ena, status, hw->adminq.sq_last_status);
1412 1413
		ret = -EIO;
		goto out;
1414 1415
	}

1416 1417 1418 1419
	vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
	devm_kfree(dev, ctxt);
	return ret;
1420
}
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443

/**
 * ice_vsi_start_rx_rings - start VSI's Rx rings
 * @vsi: the VSI whose rings are to be started
 *
 * Returns 0 on success and a negative value on error
 */
int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
{
	return ice_vsi_ctrl_rx_rings(vsi, true);
}

/**
 * ice_vsi_stop_rx_rings - stop VSI's Rx rings
 * @vsi: the VSI
 *
 * Returns 0 on success and a negative value on error
 */
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
{
	return ice_vsi_ctrl_rx_rings(vsi, false);
}

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
/**
 * ice_vsi_stop_tx_rings - Disable Tx rings
 * @vsi: the VSI being configured
 * @rst_src: reset source
 * @rel_vmvf_num: Relative ID of VF/VM
 * @rings: Tx ring array to be stopped
 */
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
		      u16 rel_vmvf_num, struct ice_ring **rings)
{
1455
	u16 q_idx;
1456 1457 1458

	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
		return -EINVAL;
1459

1460 1461 1462
	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
		struct ice_txq_meta txq_meta = { };
		int status;
1463

1464 1465
		if (!rings || !rings[q_idx])
			return -EINVAL;
1466

1467 1468 1469
		ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
		status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
					      rings[q_idx], &txq_meta);
1470

1471 1472
		if (status)
			return status;
1473 1474
	}

1475
	return 0;
1476
}
1477

1478 1479 1480 1481
/**
 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
 * @vsi: the VSI being configured
 * @rst_src: reset source
1482
 * @rel_vmvf_num: Relative ID of VF/VM
1483
 */
1484 1485 1486
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
			  u16 rel_vmvf_num)
1487
{
1488
	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
1489 1490
}

1491 1492 1493 1494
/**
 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
 * @vsi: VSI to enable or disable VLAN pruning on
 * @ena: set to true to enable VLAN pruning and false to disable it
1495
 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
1496 1497 1498
 *
 * returns 0 if VSI is updated, negative otherwise
 */
1499
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
1500 1501 1502
{
	struct ice_vsi_ctx *ctxt;
	struct device *dev;
1503
	struct ice_pf *pf;
1504 1505 1506 1507 1508
	int status;

	if (!vsi)
		return -EINVAL;

1509 1510
	pf = vsi->back;
	dev = &pf->pdev->dev;
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
	if (!ctxt)
		return -ENOMEM;

	ctxt->info = vsi->info;

	if (ena) {
		ctxt->info.sec_flags |=
			ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
			ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
		ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
	} else {
		ctxt->info.sec_flags &=
			~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
			  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
		ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
	}

1529 1530 1531 1532
	if (!vlan_promisc)
		ctxt->info.valid_sections =
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
				    ICE_AQ_VSI_PROP_SW_VALID);
1533

1534
	status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
1535
	if (status) {
1536
		netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
1537
			   ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
1538
			   pf->hw.adminq.sq_last_status);
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
		goto err_out;
	}

	vsi->info.sec_flags = ctxt->info.sec_flags;
	vsi->info.sw_flags2 = ctxt->info.sw_flags2;

	devm_kfree(dev, ctxt);
	return 0;

err_out:
	devm_kfree(dev, ctxt);
	return -EIO;
}

1553 1554 1555 1556 1557 1558 1559 1560
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
	struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;

	vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
	vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
}

1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
/**
 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
 * @vsi: VSI to set the q_vectors register index on
 */
static int
ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
{
	u16 i;

	if (!vsi || !vsi->q_vectors)
		return -EINVAL;

	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		if (!q_vector) {
			dev_err(&vsi->back->pdev->dev,
				"Failed to set reg_idx on q_vector %d VSI %d\n",
				i, vsi->vsi_num);
			goto clear_reg_idx;
		}

B
Brett Creeley 已提交
1583 1584 1585 1586 1587 1588 1589 1590
		if (vsi->type == ICE_VSI_VF) {
			struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];

			q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
		} else {
			q_vector->reg_idx =
				q_vector->v_idx + vsi->base_vector;
		}
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
	}

	return 0;

clear_reg_idx:
	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		if (q_vector)
			q_vector->reg_idx = 0;
	}

	return -EINVAL;
}

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
/**
 * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
 * @vsi: the VSI being configured
 * @add_rule: boolean value to add or remove ethertype filter rule
 */
static void
ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;

	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
	if (!list)
		return;

	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
	list->fltr_info.fltr_act = ICE_DROP_PACKET;
	list->fltr_info.flag = ICE_FLTR_TX;
	list->fltr_info.src_id = ICE_SRC_ID_VSI;
	list->fltr_info.vsi_handle = vsi->idx;
	list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

	if (add_rule)
		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
	else
		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);

	if (status)
		dev_err(&pf->pdev->dev,
			"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
			vsi->vsi_num, status);

	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
}

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
/**
 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
 * @vsi: the VSI being configured
 * @tx: bool to determine Tx or Rx rule
 * @create: bool to determine create or remove Rule
 */
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;

	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
	if (!list)
		return;

	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
	list->fltr_info.vsi_handle = vsi->idx;
1665
	list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693

	if (tx) {
		list->fltr_info.fltr_act = ICE_DROP_PACKET;
		list->fltr_info.flag = ICE_FLTR_TX;
		list->fltr_info.src_id = ICE_SRC_ID_VSI;
	} else {
		list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
		list->fltr_info.flag = ICE_FLTR_RX;
		list->fltr_info.src_id = ICE_SRC_ID_LPORT;
	}

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

	if (create)
		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
	else
		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);

	if (status)
		dev_err(&pf->pdev->dev,
			"Fail %s %s LLDP rule on VSI %i error: %d\n",
			create ? "adding" : "removing", tx ? "TX" : "RX",
			vsi->vsi_num, status);

	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
}

1694 1695 1696 1697 1698
/**
 * ice_vsi_setup - Set up a VSI by a given type
 * @pf: board private structure
 * @pi: pointer to the port_info instance
 * @type: VSI type
1699
 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
 *         used only for ICE_VSI_VF VSI type. For other VSI types, should
 *         fill-in ICE_INVAL_VFID as input.
 *
 * This allocates the sw VSI structure and its queue resources.
 *
 * Returns pointer to the successfully allocated and configured VSI sw struct on
 * success, NULL on failure.
 */
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
1710
	      enum ice_vsi_type type, u16 vf_id)
1711 1712 1713
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
	struct device *dev = &pf->pdev->dev;
1714
	enum ice_status status;
1715 1716 1717
	struct ice_vsi *vsi;
	int ret, i;

1718 1719 1720 1721 1722
	if (type == ICE_VSI_VF)
		vsi = ice_vsi_alloc(pf, type, vf_id);
	else
		vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);

1723 1724 1725 1726 1727 1728 1729
	if (!vsi) {
		dev_err(dev, "could not allocate VSI\n");
		return NULL;
	}

	vsi->port_info = pi;
	vsi->vsw = pf->first_sw;
1730 1731 1732
	if (vsi->type == ICE_VSI_PF)
		vsi->ethtype = ETH_P_PAUSE;

1733 1734
	if (vsi->type == ICE_VSI_VF)
		vsi->vf_id = vf_id;
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744

	if (ice_vsi_get_qs(vsi)) {
		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
			vsi->idx);
		goto unroll_get_qs;
	}

	/* set RSS capabilities */
	ice_vsi_set_rss_params(vsi);

1745
	/* set TC configuration */
1746 1747
	ice_vsi_set_tc_cfg(vsi);

1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
	/* create the VSI */
	ret = ice_vsi_init(vsi);
	if (ret)
		goto unroll_get_qs;

	switch (vsi->type) {
	case ICE_VSI_PF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto unroll_vsi_init;

		ret = ice_vsi_setup_vector_base(vsi);
		if (ret)
			goto unroll_alloc_q_vector;

1763 1764 1765 1766
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto unroll_vector_base;

1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_vector_base;

		ice_vsi_map_rings_to_vectors(vsi);

		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
			ice_vsi_cfg_rss_lut_key(vsi);
		break;
1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
	case ICE_VSI_VF:
		/* VF driver will take care of creating netdev for this type and
		 * map queues to vectors through Virtchnl, PF driver only
		 * creates a VSI and corresponding structures for bookkeeping
		 * purpose
		 */
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto unroll_vsi_init;

		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_alloc_q_vector;

1794 1795 1796 1797
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto unroll_vector_base;

1798 1799 1800 1801 1802 1803
		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
			ice_vsi_cfg_rss_lut_key(vsi);
1804
		break;
1805 1806 1807 1808 1809
	case ICE_VSI_LB:
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_vsi_init;
		break;
1810
	default:
1811
		/* clean up the resources and exit */
1812 1813 1814 1815 1816
		goto unroll_vsi_init;
	}

	/* configure VSI nodes based on number of queues and TC's */
	for (i = 0; i < vsi->tc_cfg.numtc; i++)
1817
		max_txqs[i] = vsi->alloc_txq;
1818

1819 1820 1821
	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);
	if (status) {
1822 1823
		dev_err(&pf->pdev->dev,
			"VSI %d failed lan queue config, error %d\n",
1824
			vsi->vsi_num, status);
1825 1826 1827
		goto unroll_vector_base;
	}

1828 1829 1830 1831 1832
	/* Add switch rule to drop all Tx Flow Control Frames, of look up
	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
	 * The rule is added once for PF VSI in order to create appropriate
	 * recipe, since VSI/VSI list is ignored with drop action...
1833 1834
	 * Also add rules to handle LLDP Tx and Rx packets.  Tx LLDP packets
	 * need to be dropped so that VFs cannot send LLDP packets to reconfig
1835
	 * DCB settings in the HW.  Also, if the FW DCBX engine is not running
1836
	 * then Rx LLDP packets need to be redirected up the stack.
1837
	 */
T
Tony Nguyen 已提交
1838 1839 1840
	if (!ice_is_safe_mode(pf)) {
		if (vsi->type == ICE_VSI_PF) {
			ice_vsi_add_rem_eth_mac(vsi, true);
1841

T
Tony Nguyen 已提交
1842 1843
			/* Tx LLDP packets */
			ice_cfg_sw_lldp(vsi, true, true);
1844

T
Tony Nguyen 已提交
1845 1846 1847 1848
			/* Rx LLDP packets */
			if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
				ice_cfg_sw_lldp(vsi, false, true);
		}
1849 1850
	}

1851 1852 1853
	return vsi;

unroll_vector_base:
1854
	/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
1855
	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
1856
	pf->num_avail_sw_msix += vsi->num_q_vectors;
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
unroll_alloc_q_vector:
	ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
	ice_vsi_delete(vsi);
unroll_get_qs:
	ice_vsi_put_qs(vsi);
	ice_vsi_clear(vsi);

	return NULL;
}

1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
/**
 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
 * @vsi: the VSI being cleaned up
 */
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 txq = 0;
	u32 rxq = 0;
	int i, q;

B
Brett Creeley 已提交
1880
	for (i = 0; i < vsi->num_q_vectors; i++) {
1881
		struct ice_q_vector *q_vector = vsi->q_vectors[i];
B
Brett Creeley 已提交
1882
		u16 reg_idx = q_vector->reg_idx;
1883

B
Brett Creeley 已提交
1884 1885
		wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
		wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
		for (q = 0; q < q_vector->num_ring_tx; q++) {
			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
			txq++;
		}

		for (q = 0; q < q_vector->num_ring_rx; q++) {
			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
			rxq++;
		}
	}

	ice_flush(hw);
}

/**
 * ice_vsi_free_irq - Free the IRQ association with the OS
 * @vsi: the VSI being configured
 */
void ice_vsi_free_irq(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
1907
	int base = vsi->base_vector;
1908
	int i;
1909

1910 1911
	if (!vsi->q_vectors || !vsi->irqs_ready)
		return;
1912

1913 1914 1915
	ice_vsi_release_msix(vsi);
	if (vsi->type == ICE_VSI_VF)
		return;
1916

1917 1918 1919 1920
	vsi->irqs_ready = false;
	ice_for_each_q_vector(vsi, i) {
		u16 vector = i + base;
		int irq_num;
1921

1922
		irq_num = pf->msix_entries[vector].vector;
1923

1924 1925 1926 1927 1928
		/* free only the irqs that were actually requested */
		if (!vsi->q_vectors[i] ||
		    !(vsi->q_vectors[i]->num_ring_tx ||
		      vsi->q_vectors[i]->num_ring_rx))
			continue;
1929

1930 1931
		/* clear the affinity notifier in the IRQ descriptor */
		irq_set_affinity_notifier(irq_num, NULL);
1932

1933 1934 1935 1936 1937
		/* clear the affinity_mask in the IRQ descriptor */
		irq_set_affinity_hint(irq_num, NULL);
		synchronize_irq(irq_num);
		devm_free_irq(&pf->pdev->dev, irq_num,
			      vsi->q_vectors[i]);
1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
	}
}

/**
 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
 * @vsi: the VSI having resources freed
 */
void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->tx_rings)
		return;

	ice_for_each_txq(vsi, i)
		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
			ice_free_tx_ring(vsi->tx_rings[i]);
}

/**
 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
 * @vsi: the VSI having resources freed
 */
void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->rx_rings)
		return;

	ice_for_each_rxq(vsi, i)
		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
			ice_free_rx_ring(vsi->rx_rings[i]);
}

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
/**
 * ice_vsi_close - Shut down a VSI
 * @vsi: the VSI being shut down
 */
void ice_vsi_close(struct ice_vsi *vsi)
{
	if (!test_and_set_bit(__ICE_DOWN, vsi->state))
		ice_down(vsi);

	ice_vsi_free_irq(vsi);
	ice_vsi_free_tx_rings(vsi);
	ice_vsi_free_rx_rings(vsi);
}

1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
/**
 * ice_free_res - free a block of resources
 * @res: pointer to the resource
 * @index: starting index previously returned by ice_get_res
 * @id: identifier to track owner
 *
 * Returns number of resources freed
 */
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
{
	int count = 0;
	int i;

B
Brett Creeley 已提交
2000
	if (!res || index >= res->end)
2001 2002 2003
		return -EINVAL;

	id |= ICE_RES_VALID_BIT;
B
Brett Creeley 已提交
2004
	for (i = index; i < res->end && res->list[i] == id; i++) {
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
		res->list[i] = 0;
		count++;
	}

	return count;
}

/**
 * ice_search_res - Search the tracker for a block of resources
 * @res: pointer to the resource
 * @needed: size of the block needed
 * @id: identifier to track owner
 *
 * Returns the base item index of the block, or -ENOMEM for error
 */
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
B
Brett Creeley 已提交
2022
	int start = 0, end = 0;
2023

B
Brett Creeley 已提交
2024
	if (needed > res->end)
2025 2026
		return -ENOMEM;

2027 2028 2029 2030 2031 2032
	id |= ICE_RES_VALID_BIT;

	do {
		/* skip already allocated entries */
		if (res->list[end++] & ICE_RES_VALID_BIT) {
			start = end;
B
Brett Creeley 已提交
2033
			if ((start + needed) > res->end)
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
				break;
		}

		if (end == (start + needed)) {
			int i = start;

			/* there was enough, so assign it to the requestor */
			while (i != end)
				res->list[i++] = id;

			return start;
		}
B
Brett Creeley 已提交
2046
	} while (end < res->end);
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057

	return -ENOMEM;
}

/**
 * ice_get_res - get a block of resources
 * @pf: board private structure
 * @res: pointer to the resource
 * @needed: size of the block needed
 * @id: identifier to track owner
 *
B
Brett Creeley 已提交
2058
 * Returns the base item index of the block, or negative for error
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
 */
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
	if (!res || !pf)
		return -EINVAL;

	if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
		dev_err(&pf->pdev->dev,
			"param err: needed=%d, num_entries = %d id=0x%04x\n",
			needed, res->num_entries, id);
		return -EINVAL;
	}

B
Brett Creeley 已提交
2073
	return ice_search_res(res, needed, id);
2074 2075 2076 2077 2078 2079 2080 2081
}

/**
 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
 * @vsi: the VSI being un-configured
 */
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
B
Brett Creeley 已提交
2082
	int base = vsi->base_vector;
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 val;
	int i;

	/* disable interrupt causation from each queue */
	if (vsi->tx_rings) {
		ice_for_each_txq(vsi, i) {
			if (vsi->tx_rings[i]) {
				u16 reg;

				reg = vsi->tx_rings[i]->reg_idx;
				val = rd32(hw, QINT_TQCTL(reg));
				val &= ~QINT_TQCTL_CAUSE_ENA_M;
				wr32(hw, QINT_TQCTL(reg), val);
			}
		}
	}

	if (vsi->rx_rings) {
		ice_for_each_rxq(vsi, i) {
			if (vsi->rx_rings[i]) {
				u16 reg;

				reg = vsi->rx_rings[i]->reg_idx;
				val = rd32(hw, QINT_RQCTL(reg));
				val &= ~QINT_RQCTL_CAUSE_ENA_M;
				wr32(hw, QINT_RQCTL(reg), val);
			}
		}
	}

	/* disable each interrupt */
T
Tony Nguyen 已提交
2116 2117 2118
	ice_for_each_q_vector(vsi, i) {
		if (!vsi->q_vectors[i])
			continue;
2119
		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
T
Tony Nguyen 已提交
2120
	}
2121

2122
	ice_flush(hw);
2123

2124 2125 2126 2127
	/* don't call synchronize_irq() for VF's from the host */
	if (vsi->type == ICE_VSI_VF)
		return;

2128 2129
	ice_for_each_q_vector(vsi, i)
		synchronize_irq(pf->msix_entries[i + base].vector);
2130 2131
}

2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
/**
 * ice_napi_del - Remove NAPI handler for the VSI
 * @vsi: VSI for which NAPI handler is to be removed
 */
void ice_napi_del(struct ice_vsi *vsi)
{
	int v_idx;

	if (!vsi->netdev)
		return;

	ice_for_each_q_vector(vsi, v_idx)
		netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}

2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159
/**
 * ice_vsi_release - Delete a VSI and free its resources
 * @vsi: the VSI being removed
 *
 * Returns 0 on success or < 0 on error
 */
int ice_vsi_release(struct ice_vsi *vsi)
{
	struct ice_pf *pf;

	if (!vsi->back)
		return -ENODEV;
	pf = vsi->back;
2160

2161 2162 2163 2164 2165
	/* do not unregister while driver is in the reset recovery pending
	 * state. Since reset/rebuild happens through PF service task workqueue,
	 * it's not a good idea to unregister netdev that is associated to the
	 * PF that is running the work queue items currently. This is done to
	 * avoid check_flush_dependency() warning on this wq
2166
	 */
2167
	if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
2168 2169 2170 2171 2172 2173
		unregister_netdev(vsi->netdev);

	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
		ice_rss_clean(vsi);

	/* Disable VSI and free resources */
2174 2175
	if (vsi->type != ICE_VSI_LB)
		ice_vsi_dis_irq(vsi);
2176 2177
	ice_vsi_close(vsi);

B
Brett Creeley 已提交
2178 2179 2180 2181 2182
	/* SR-IOV determines needed MSIX resources all at once instead of per
	 * VSI since when VFs are spawned we know how many VFs there are and how
	 * many interrupts each VF needs. SR-IOV MSIX resources are also
	 * cleared in the same manner.
	 */
2183 2184
	if (vsi->type != ICE_VSI_VF) {
		/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2185
		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2186 2187
		pf->num_avail_sw_msix += vsi->num_q_vectors;
	}
2188

T
Tony Nguyen 已提交
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198
	if (!ice_is_safe_mode(pf)) {
		if (vsi->type == ICE_VSI_PF) {
			ice_vsi_add_rem_eth_mac(vsi, false);
			ice_cfg_sw_lldp(vsi, true, false);
			/* The Rx rule will only exist to remove if the LLDP FW
			 * engine is currently stopped
			 */
			if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
				ice_cfg_sw_lldp(vsi, false, false);
		}
2199
	}
2200

2201
	ice_remove_vsi_fltr(&pf->hw, vsi->idx);
2202
	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2203 2204
	ice_vsi_delete(vsi);
	ice_vsi_free_q_vectors(vsi);
2205 2206 2207 2208 2209 2210 2211

	/* make sure unregister_netdev() was called by checking __ICE_DOWN */
	if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
		free_netdev(vsi->netdev);
		vsi->netdev = NULL;
	}

2212 2213 2214 2215 2216 2217 2218 2219
	ice_vsi_clear_rings(vsi);

	ice_vsi_put_qs(vsi);

	/* retain SW VSI data structure since it is needed to unregister and
	 * free VSI netdev when PF is not in reset recovery pending state,\
	 * for ex: during rmmod.
	 */
2220
	if (!ice_is_reset_in_progress(pf->state))
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
		ice_vsi_clear(vsi);

	return 0;
}

/**
 * ice_vsi_rebuild - Rebuild VSI after reset
 * @vsi: VSI to be rebuild
 *
 * Returns 0 on success and negative value on failure
 */
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2235
	struct ice_vf *vf = NULL;
2236
	enum ice_status status;
2237
	struct ice_pf *pf;
2238 2239 2240 2241 2242
	int ret, i;

	if (!vsi)
		return -EINVAL;

2243
	pf = vsi->back;
2244 2245 2246
	if (vsi->type == ICE_VSI_VF)
		vf = &pf->vf[vsi->vf_id];

2247
	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2248
	ice_vsi_free_q_vectors(vsi);
2249

B
Brett Creeley 已提交
2250 2251 2252 2253 2254
	/* SR-IOV determines needed MSIX resources all at once instead of per
	 * VSI since when VFs are spawned we know how many VFs there are and how
	 * many interrupts each VF needs. SR-IOV MSIX resources are also
	 * cleared in the same manner.
	 */
2255 2256
	if (vsi->type != ICE_VSI_VF) {
		/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2257
		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2258
		pf->num_avail_sw_msix += vsi->num_q_vectors;
B
Brett Creeley 已提交
2259
		vsi->base_vector = 0;
2260 2261
	}

2262
	ice_vsi_put_qs(vsi);
2263
	ice_vsi_clear_rings(vsi);
2264
	ice_vsi_free_arrays(vsi);
2265
	ice_dev_onetime_setup(&pf->hw);
2266 2267 2268 2269
	if (vsi->type == ICE_VSI_VF)
		ice_vsi_set_num_qs(vsi, vf->vf_id);
	else
		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
2270 2271 2272 2273 2274 2275

	ret = ice_vsi_alloc_arrays(vsi);
	if (ret < 0)
		goto err_vsi;

	ice_vsi_get_qs(vsi);
2276
	ice_vsi_set_tc_cfg(vsi);
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288

	/* Initialize VSI struct elements and create VSI in FW */
	ret = ice_vsi_init(vsi);
	if (ret < 0)
		goto err_vsi;

	switch (vsi->type) {
	case ICE_VSI_PF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto err_rings;

2289 2290 2291 2292
		ret = ice_vsi_setup_vector_base(vsi);
		if (ret)
			goto err_vectors;

2293 2294 2295 2296
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto err_vectors;

2297 2298 2299 2300 2301
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto err_vectors;

		ice_vsi_map_rings_to_vectors(vsi);
2302 2303 2304 2305
		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
2306
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2307
			ice_vsi_cfg_rss_lut_key(vsi);
2308
		break;
2309 2310 2311 2312 2313
	case ICE_VSI_VF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto err_rings;

2314 2315 2316 2317
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto err_vectors;

2318 2319 2320 2321 2322
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto err_vectors;

		break;
2323 2324 2325 2326 2327 2328
	default:
		break;
	}

	/* configure VSI nodes based on number of queues and TC's */
	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2329
		max_txqs[i] = vsi->alloc_txq;
2330

2331 2332 2333
	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);
	if (status) {
2334 2335
		dev_err(&pf->pdev->dev,
			"VSI %d failed lan queue config, error %d\n",
2336
			vsi->vsi_num, status);
2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
		goto err_vectors;
	}
	return 0;

err_vectors:
	ice_vsi_free_q_vectors(vsi);
err_rings:
	if (vsi->netdev) {
		vsi->current_netdev_flags = 0;
		unregister_netdev(vsi->netdev);
		free_netdev(vsi->netdev);
		vsi->netdev = NULL;
	}
err_vsi:
	ice_vsi_clear(vsi);
2352
	set_bit(__ICE_RESET_FAILED, pf->state);
2353 2354 2355
	return ret;
}

2356
/**
2357
 * ice_is_reset_in_progress - check for a reset in progress
2358
 * @state: PF state field
2359
 */
2360
bool ice_is_reset_in_progress(unsigned long *state)
2361
{
2362 2363 2364 2365
	return test_bit(__ICE_RESET_OICR_RECV, state) ||
	       test_bit(__ICE_PFR_REQ, state) ||
	       test_bit(__ICE_CORER_REQ, state) ||
	       test_bit(__ICE_GLOBR_REQ, state);
2366
}
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403

#ifdef CONFIG_DCB
/**
 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
 * @vsi: VSI being configured
 * @ctx: the context buffer returned from AQ VSI update command
 */
static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
{
	vsi->info.mapping_flags = ctx->info.mapping_flags;
	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
	       sizeof(vsi->info.q_mapping));
	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
	       sizeof(vsi->info.tc_mapping));
}

/**
 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
 * @vsi: VSI to be configured
 * @ena_tc: TC bitmap
 *
 * VSI queues expected to be quiesced before calling this function
 */
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
	struct ice_vsi_ctx *ctx;
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
	int i, ret = 0;
	u8 num_tc = 0;

	ice_for_each_traffic_class(i) {
		/* build bitmap of enabled TCs */
		if (ena_tc & BIT(i))
			num_tc++;
		/* populate max_txqs per TC */
2404
		max_txqs[i] = vsi->alloc_txq;
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
	}

	vsi->tc_cfg.ena_tc = ena_tc;
	vsi->tc_cfg.numtc = num_tc;

	ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

	ctx->vf_num = 0;
	ctx->info = vsi->info;

	ice_vsi_setup_q_map(vsi, ctx);

	/* must to indicate which section of VSI context are being modified */
	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
	status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
	if (status) {
		dev_info(&pf->pdev->dev, "Failed VSI Update\n");
		ret = -EIO;
		goto out;
	}

	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);

	if (status) {
		dev_err(&pf->pdev->dev,
			"VSI %d failed TC config, error %d\n",
			vsi->vsi_num, status);
		ret = -EIO;
		goto out;
	}
	ice_vsi_update_q_map(vsi, ctx);
	vsi->info.valid_sections = 0;

	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
out:
	devm_kfree(&pf->pdev->dev, ctx);
	return ret;
}
#endif /* CONFIG_DCB */
2447

2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
/**
 * ice_nvm_version_str - format the NVM version strings
 * @hw: ptr to the hardware info
 */
char *ice_nvm_version_str(struct ice_hw *hw)
{
	u8 oem_ver, oem_patch, ver_hi, ver_lo;
	static char buf[ICE_NVM_VER_LEN];
	u16 oem_build;

	ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
			    &ver_lo);

	snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
		 hw->nvm.eetrack, oem_ver, oem_build, oem_patch);

	return buf;
}

2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
/**
 * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
 * @vsi: the VSI being configured MAC filter
 * @macaddr: the MAC address to be added.
 * @set: Add or delete a MAC filter
 *
 * Adds or removes MAC address filter entry for VF VSI
 */
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
{
	LIST_HEAD(tmp_add_list);
	enum ice_status status;

	 /* Update MAC filter list to be added or removed for a VSI */
	if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
		status = ICE_ERR_NO_MEMORY;
		goto cfg_mac_fltr_exit;
	}

	if (set)
		status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
	else
		status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);

cfg_mac_fltr_exit:
	ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
	return status;
}