ice_lib.c 69.7 KB
Newer Older
1 2 3 4
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */

#include "ice.h"
A
Anirudh Venkataramanan 已提交
5
#include "ice_base.h"
6
#include "ice_lib.h"
7
#include "ice_dcb_lib.h"
8

9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/**
 * ice_vsi_type_str - maps VSI type enum to string equivalents
 * @type: VSI type enum
 */
const char *ice_vsi_type_str(enum ice_vsi_type type)
{
	switch (type) {
	case ICE_VSI_PF:
		return "ICE_VSI_PF";
	case ICE_VSI_VF:
		return "ICE_VSI_VF";
	case ICE_VSI_LB:
		return "ICE_VSI_LB";
	default:
		return "unknown";
	}
}

27 28 29 30 31 32 33 34 35 36 37 38
/**
 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
 * @vsi: the VSI being configured
 * @ena: start or stop the Rx rings
 */
static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
{
	int i, ret = 0;

	for (i = 0; i < vsi->num_rxq; i++) {
		ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
		if (ret)
39 40 41 42 43 44
			break;
	}

	return ret;
}

45 46 47 48 49 50 51
/**
 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
 * @vsi: VSI pointer
 *
 * On error: returns error code (negative)
 * On success: returns 0
 */
52
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
53 54
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
55 56 57
	struct device *dev;

	dev = ice_pf_to_dev(pf);
58 59

	/* allocate memory for both Tx and Rx ring pointers */
B
Brett Creeley 已提交
60
	vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
61
				     sizeof(*vsi->tx_rings), GFP_KERNEL);
62
	if (!vsi->tx_rings)
63
		return -ENOMEM;
64

B
Brett Creeley 已提交
65
	vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
66
				     sizeof(*vsi->rx_rings), GFP_KERNEL);
67
	if (!vsi->rx_rings)
68 69
		goto err_rings;

M
Maciej Fijalkowski 已提交
70
	/* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
B
Brett Creeley 已提交
71
	vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
72 73 74 75 76
				    sizeof(*vsi->txq_map), GFP_KERNEL);

	if (!vsi->txq_map)
		goto err_txq_map;

B
Brett Creeley 已提交
77
	vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
78 79 80 81
				    sizeof(*vsi->rxq_map), GFP_KERNEL);
	if (!vsi->rxq_map)
		goto err_rxq_map;

82 83 84 85
	/* There is no need to allocate q_vectors for a loopback VSI. */
	if (vsi->type == ICE_VSI_LB)
		return 0;

86
	/* allocate memory for q_vector pointers */
B
Brett Creeley 已提交
87
	vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
88 89 90
				      sizeof(*vsi->q_vectors), GFP_KERNEL);
	if (!vsi->q_vectors)
		goto err_vectors;
91 92 93 94

	return 0;

err_vectors:
B
Brett Creeley 已提交
95
	devm_kfree(dev, vsi->rxq_map);
96
err_rxq_map:
B
Brett Creeley 已提交
97
	devm_kfree(dev, vsi->txq_map);
98
err_txq_map:
B
Brett Creeley 已提交
99
	devm_kfree(dev, vsi->rx_rings);
100
err_rings:
B
Brett Creeley 已提交
101
	devm_kfree(dev, vsi->tx_rings);
102 103 104 105
	return -ENOMEM;
}

/**
106 107 108 109 110 111 112
 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
 * @vsi: the VSI being configured
 */
static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
	switch (vsi->type) {
	case ICE_VSI_PF:
113 114
		/* fall through */
	case ICE_VSI_LB:
115 116 117 118 119 120 121 122 123 124 125 126 127
		vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
		vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
		break;
	default:
		dev_dbg(&vsi->back->pdev->dev,
			"Not setting number of Tx/Rx descriptors for VSI type %d\n",
			vsi->type);
		break;
	}
}

/**
 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
128
 * @vsi: the VSI being configured
129
 * @vf_id: ID of the VF being configured
130 131 132
 *
 * Return 0 on success and a negative value on error
 */
133
static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
134 135
{
	struct ice_pf *pf = vsi->back;
136 137 138 139 140
	struct ice_vf *vf = NULL;

	if (vsi->type == ICE_VSI_VF)
		vsi->vf_id = vf_id;

141 142
	switch (vsi->type) {
	case ICE_VSI_PF:
143 144
		vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
				       num_online_cpus());
145 146 147 148
		if (vsi->req_txq) {
			vsi->alloc_txq = vsi->req_txq;
			vsi->num_txq = vsi->req_txq;
		}
149 150 151 152

		pf->num_lan_tx = vsi->alloc_txq;

		/* only 1 Rx queue unless RSS is enabled */
153
		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
154
			vsi->alloc_rxq = 1;
155
		} else {
156 157
			vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
					       num_online_cpus());
158 159 160 161 162
			if (vsi->req_rxq) {
				vsi->alloc_rxq = vsi->req_rxq;
				vsi->num_rxq = vsi->req_rxq;
			}
		}
163 164 165

		pf->num_lan_rx = vsi->alloc_rxq;

166
		vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
167
		break;
168
	case ICE_VSI_VF:
169 170 171
		vf = &pf->vf[vsi->vf_id];
		vsi->alloc_txq = vf->num_vf_qs;
		vsi->alloc_rxq = vf->num_vf_qs;
172 173
		/* pf->num_vf_msix includes (VF miscellaneous vector +
		 * data queue interrupts). Since vsi->num_q_vectors is number
174 175
		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
		 * original vector count
176
		 */
177
		vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
178
		break;
179 180 181 182
	case ICE_VSI_LB:
		vsi->alloc_txq = 1;
		vsi->alloc_rxq = 1;
		break;
183
	default:
B
Brett Creeley 已提交
184
		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
185 186
		break;
	}
187 188

	ice_vsi_set_num_desc(vsi);
189 190 191 192 193 194 195 196 197 198 199
}

/**
 * ice_get_free_slot - get the next non-NULL location index in array
 * @array: array to search
 * @size: size of the array
 * @curr: last known occupied index to be used as a search hint
 *
 * void * is being used to keep the functionality generic. This lets us use this
 * function on any array of pointers.
 */
200
static int ice_get_free_slot(void *array, int size, int curr)
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
{
	int **tmp_array = (int **)array;
	int next;

	if (curr < (size - 1) && !tmp_array[curr + 1]) {
		next = curr + 1;
	} else {
		int i = 0;

		while ((i < size) && (tmp_array[i]))
			i++;
		if (i == size)
			next = ICE_NO_VSI;
		else
			next = i;
	}
	return next;
}

220 221 222 223 224 225 226
/**
 * ice_vsi_delete - delete a VSI from the switch
 * @vsi: pointer to VSI being removed
 */
void ice_vsi_delete(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
227
	struct ice_vsi_ctx *ctxt;
228 229
	enum ice_status status;

230
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
231 232 233
	if (!ctxt)
		return;

234
	if (vsi->type == ICE_VSI_VF)
235 236
		ctxt->vf_num = vsi->vf_id;
	ctxt->vsi_num = vsi->vsi_num;
237

238
	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
239

240
	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
241
	if (status)
B
Brett Creeley 已提交
242 243
		dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
			vsi->vsi_num, status);
244

245
	kfree(ctxt);
246 247
}

248
/**
249
 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
250 251
 * @vsi: pointer to VSI being cleared
 */
252
static void ice_vsi_free_arrays(struct ice_vsi *vsi)
253 254
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
255 256 257
	struct device *dev;

	dev = ice_pf_to_dev(pf);
258 259

	/* free the ring and vector containers */
260
	if (vsi->q_vectors) {
B
Brett Creeley 已提交
261
		devm_kfree(dev, vsi->q_vectors);
262 263 264
		vsi->q_vectors = NULL;
	}
	if (vsi->tx_rings) {
B
Brett Creeley 已提交
265
		devm_kfree(dev, vsi->tx_rings);
266 267 268
		vsi->tx_rings = NULL;
	}
	if (vsi->rx_rings) {
B
Brett Creeley 已提交
269
		devm_kfree(dev, vsi->rx_rings);
270 271
		vsi->rx_rings = NULL;
	}
272
	if (vsi->txq_map) {
B
Brett Creeley 已提交
273
		devm_kfree(dev, vsi->txq_map);
274 275 276
		vsi->txq_map = NULL;
	}
	if (vsi->rxq_map) {
B
Brett Creeley 已提交
277
		devm_kfree(dev, vsi->rxq_map);
278 279
		vsi->rxq_map = NULL;
	}
280 281 282 283 284 285 286 287 288 289 290 291 292 293
}

/**
 * ice_vsi_clear - clean up and deallocate the provided VSI
 * @vsi: pointer to VSI being cleared
 *
 * This deallocates the VSI's queue resources, removes it from the PF's
 * VSI array if necessary, and deallocates the VSI
 *
 * Returns 0 on success, negative on failure
 */
int ice_vsi_clear(struct ice_vsi *vsi)
{
	struct ice_pf *pf = NULL;
B
Brett Creeley 已提交
294
	struct device *dev;
295 296 297 298 299 300 301 302

	if (!vsi)
		return 0;

	if (!vsi->back)
		return -EINVAL;

	pf = vsi->back;
B
Brett Creeley 已提交
303
	dev = ice_pf_to_dev(pf);
304 305

	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
B
Brett Creeley 已提交
306
		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
307 308 309 310 311 312 313 314 315 316
		return -EINVAL;
	}

	mutex_lock(&pf->sw_mutex);
	/* updates the PF for this cleared VSI */

	pf->vsi[vsi->idx] = NULL;
	if (vsi->idx < pf->next_vsi)
		pf->next_vsi = vsi->idx;

317
	ice_vsi_free_arrays(vsi);
318
	mutex_unlock(&pf->sw_mutex);
B
Brett Creeley 已提交
319
	devm_kfree(dev, vsi);
320 321 322 323

	return 0;
}

324 325 326 327 328
/**
 * ice_msix_clean_rings - MSIX mode Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a q_vector
 */
329
static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
330 331 332 333 334 335 336 337 338 339 340
{
	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;

	if (!q_vector->tx.ring && !q_vector->rx.ring)
		return IRQ_HANDLED;

	napi_schedule(&q_vector->napi);

	return IRQ_HANDLED;
}

341 342 343 344
/**
 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
 * @pf: board private structure
 * @type: type of VSI
345
 * @vf_id: ID of the VF being configured
346 347 348
 *
 * returns a pointer to a VSI on success, NULL on failure.
 */
349 350
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
351
{
B
Brett Creeley 已提交
352
	struct device *dev = ice_pf_to_dev(pf);
353 354 355 356 357 358 359 360 361 362
	struct ice_vsi *vsi = NULL;

	/* Need to protect the allocation of the VSIs at the PF level */
	mutex_lock(&pf->sw_mutex);

	/* If we have already allocated our maximum number of VSIs,
	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
	 * is available to be populated
	 */
	if (pf->next_vsi == ICE_NO_VSI) {
B
Brett Creeley 已提交
363
		dev_dbg(dev, "out of VSI slots!\n");
364 365 366
		goto unlock_pf;
	}

B
Brett Creeley 已提交
367
	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
368 369 370 371 372 373
	if (!vsi)
		goto unlock_pf;

	vsi->type = type;
	vsi->back = pf;
	set_bit(__ICE_DOWN, vsi->state);
374

375 376
	vsi->idx = pf->next_vsi;

377 378 379 380
	if (type == ICE_VSI_VF)
		ice_vsi_set_num_qs(vsi, vf_id);
	else
		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
381 382 383

	switch (vsi->type) {
	case ICE_VSI_PF:
384
		if (ice_vsi_alloc_arrays(vsi))
385 386 387 388 389
			goto err_rings;

		/* Setup default MSIX irq handler for VSI */
		vsi->irq_handler = ice_msix_clean_rings;
		break;
390
	case ICE_VSI_VF:
391
		if (ice_vsi_alloc_arrays(vsi))
392 393
			goto err_rings;
		break;
394 395 396 397
	case ICE_VSI_LB:
		if (ice_vsi_alloc_arrays(vsi))
			goto err_rings;
		break;
398
	default:
B
Brett Creeley 已提交
399
		dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
400 401 402 403 404 405 406 407 408 409 410 411
		goto unlock_pf;
	}

	/* fill VSI slot in the PF struct */
	pf->vsi[pf->next_vsi] = vsi;

	/* prepare pf->next_vsi for next use */
	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
					 pf->next_vsi);
	goto unlock_pf;

err_rings:
B
Brett Creeley 已提交
412
	devm_kfree(dev, vsi);
413 414 415 416 417 418
	vsi = NULL;
unlock_pf:
	mutex_unlock(&pf->sw_mutex);
	return vsi;
}

419 420 421 422 423 424
/**
 * ice_vsi_get_qs - Assign queues from PF to VSI
 * @vsi: the VSI to assign queues to
 *
 * Returns 0 on success and a negative value on error
 */
425
static int ice_vsi_get_qs(struct ice_vsi *vsi)
426
{
427 428 429 430
	struct ice_pf *pf = vsi->back;
	struct ice_qs_cfg tx_qs_cfg = {
		.qs_mutex = &pf->avail_q_mutex,
		.pf_map = pf->avail_txqs,
431
		.pf_map_size = pf->max_pf_txqs,
432 433 434 435 436 437 438 439 440
		.q_count = vsi->alloc_txq,
		.scatter_count = ICE_MAX_SCATTER_TXQS,
		.vsi_map = vsi->txq_map,
		.vsi_map_offset = 0,
		.mapping_mode = vsi->tx_mapping_mode
	};
	struct ice_qs_cfg rx_qs_cfg = {
		.qs_mutex = &pf->avail_q_mutex,
		.pf_map = pf->avail_rxqs,
441
		.pf_map_size = pf->max_pf_rxqs,
442 443 444 445 446 447
		.q_count = vsi->alloc_rxq,
		.scatter_count = ICE_MAX_SCATTER_RXQS,
		.vsi_map = vsi->rxq_map,
		.vsi_map_offset = 0,
		.mapping_mode = vsi->rx_mapping_mode
	};
448 449 450 451 452
	int ret = 0;

	vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
	vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;

453 454 455
	ret = __ice_vsi_get_qs(&tx_qs_cfg);
	if (!ret)
		ret = __ice_vsi_get_qs(&rx_qs_cfg);
456 457 458 459

	return ret;
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
/**
 * ice_vsi_put_qs - Release queues from VSI to PF
 * @vsi: the VSI that is going to release queues
 */
void ice_vsi_put_qs(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	int i;

	mutex_lock(&pf->avail_q_mutex);

	for (i = 0; i < vsi->alloc_txq; i++) {
		clear_bit(vsi->txq_map[i], pf->avail_txqs);
		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
	}

	for (i = 0; i < vsi->alloc_rxq; i++) {
		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
	}

	mutex_unlock(&pf->avail_q_mutex);
}

T
Tony Nguyen 已提交
484 485 486 487 488 489 490 491 492 493 494
/**
 * ice_is_safe_mode
 * @pf: pointer to the PF struct
 *
 * returns true if driver is in safe mode, false otherwise
 */
bool ice_is_safe_mode(struct ice_pf *pf)
{
	return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
}

495 496 497 498 499 500
/**
 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
 * @vsi: the VSI being removed
 */
static void ice_rss_clean(struct ice_vsi *vsi)
{
B
Brett Creeley 已提交
501 502
	struct ice_pf *pf = vsi->back;
	struct device *dev;
503

B
Brett Creeley 已提交
504
	dev = ice_pf_to_dev(pf);
505 506

	if (vsi->rss_hkey_user)
B
Brett Creeley 已提交
507
		devm_kfree(dev, vsi->rss_hkey_user);
508
	if (vsi->rss_lut_user)
B
Brett Creeley 已提交
509
		devm_kfree(dev, vsi->rss_lut_user);
510 511
}

512 513 514 515
/**
 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
 * @vsi: the VSI being configured
 */
516
static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
{
	struct ice_hw_common_caps *cap;
	struct ice_pf *pf = vsi->back;

	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
		vsi->rss_size = 1;
		return;
	}

	cap = &pf->hw.func_caps.common_cap;
	switch (vsi->type) {
	case ICE_VSI_PF:
		/* PF VSI will inherit RSS instance of PF */
		vsi->rss_table_size = cap->rss_table_size;
		vsi->rss_size = min_t(int, num_online_cpus(),
				      BIT(cap->rss_table_entry_width));
		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
		break;
535 536 537 538 539 540 541 542 543
	case ICE_VSI_VF:
		/* VF VSI will gets a small RSS table
		 * For VSI_LUT, LUT size should be set to 64 bytes
		 */
		vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
		vsi->rss_size = min_t(int, num_online_cpus(),
				      BIT(cap->rss_table_entry_width));
		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
		break;
544 545
	case ICE_VSI_LB:
		break;
546
	default:
B
Brett Creeley 已提交
547
		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
			 vsi->type);
		break;
	}
}

/**
 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
 * @ctxt: the VSI context being set
 *
 * This initializes a default VSI context for all sections except the Queues.
 */
static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
{
	u32 table = 0;

	memset(&ctxt->info, 0, sizeof(ctxt->info));
	/* VSI's should be allocated from shared pool */
	ctxt->alloc_from_pool = true;
	/* Src pruning enabled by default */
	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
	/* Traffic from VSI can be sent to LAN */
	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
	/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
	 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
	 * packets untagged/tagged.
	 */
	ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
				  ICE_AQ_VSI_VLAN_MODE_M) >>
				 ICE_AQ_VSI_VLAN_MODE_S);
	/* Have 1:1 UP mapping for both ingress/egress tables */
	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
	ctxt->info.ingress_table = cpu_to_le32(table);
	ctxt->info.egress_table = cpu_to_le32(table);
	/* Have 1:1 UP mapping for outer to inner UP table */
	ctxt->info.outer_up_table = cpu_to_le32(table);
	/* No Outer tag support outer_tag_flags remains to zero */
}

/**
 * ice_vsi_setup_q_map - Setup a VSI queue map
 * @vsi: the VSI being configured
 * @ctxt: VSI context structure
 */
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
600
	u16 offset = 0, qmap = 0, tx_count = 0;
601 602
	u16 qcount_tx = vsi->alloc_txq;
	u16 qcount_rx = vsi->alloc_rxq;
603 604
	u16 tx_numq_tc, rx_numq_tc;
	u16 pow = 0, max_rss = 0;
605
	bool ena_tc0 = false;
606
	u8 netdev_tc = 0;
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
	int i;

	/* at least TC0 should be enabled by default */
	if (vsi->tc_cfg.numtc) {
		if (!(vsi->tc_cfg.ena_tc & BIT(0)))
			ena_tc0 = true;
	} else {
		ena_tc0 = true;
	}

	if (ena_tc0) {
		vsi->tc_cfg.numtc++;
		vsi->tc_cfg.ena_tc |= 1;
	}

622 623 624 625 626 627
	rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
	if (!rx_numq_tc)
		rx_numq_tc = 1;
	tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
	if (!tx_numq_tc)
		tx_numq_tc = 1;
628 629 630 631 632 633 634 635 636 637 638 639 640

	/* TC mapping is a function of the number of Rx queues assigned to the
	 * VSI for each traffic class and the offset of these queues.
	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
	 * queues allocated to TC0. No:of queues is a power-of-2.
	 *
	 * If TC is not enabled, the queue offset is set to 0, and allocate one
	 * queue, this way, traffic for the given TC will be sent to the default
	 * queue.
	 *
	 * Setup number and offset of Rx queues for all TCs for the VSI
	 */

641 642
	qcount_rx = rx_numq_tc;

643 644
	/* qcount will change if RSS is enabled */
	if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
645 646 647 648 649
		if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
			if (vsi->type == ICE_VSI_PF)
				max_rss = ICE_MAX_LG_RSS_QS;
			else
				max_rss = ICE_MAX_SMALL_RSS_QS;
650
			qcount_rx = min_t(int, rx_numq_tc, max_rss);
651 652 653
			if (!vsi->req_rxq)
				qcount_rx = min_t(int, qcount_rx,
						  vsi->rss_size);
654
		}
655 656 657
	}

	/* find the (rounded up) power-of-2 of qcount */
658
	pow = order_base_2(qcount_rx);
659

660
	ice_for_each_traffic_class(i) {
661 662 663
		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
			/* TC is not enabled */
			vsi->tc_cfg.tc_info[i].qoffset = 0;
664 665 666
			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
667 668 669 670 671 672
			ctxt->info.tc_mapping[i] = 0;
			continue;
		}

		/* TC is enabled */
		vsi->tc_cfg.tc_info[i].qoffset = offset;
673 674 675
		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
		vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
676 677 678 679 680

		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
			ICE_AQ_VSI_TC_Q_OFFSET_M) |
			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
			 ICE_AQ_VSI_TC_Q_NUM_M);
681 682
		offset += qcount_rx;
		tx_count += tx_numq_tc;
683 684
		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
	}
K
Kiran Patil 已提交
685 686 687 688 689 690 691 692 693 694 695 696

	/* if offset is non-zero, means it is calculated correctly based on
	 * enabled TCs for a given VSI otherwise qcount_rx will always
	 * be correct and non-zero because it is based off - VSI's
	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
	 * at least 1)
	 */
	if (offset)
		vsi->num_rxq = offset;
	else
		vsi->num_rxq = qcount_rx;

697
	vsi->num_txq = tx_count;
698

699 700 701 702 703 704 705 706
	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
		dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
		/* since there is a chance that num_rxq could have been changed
		 * in the above for loop, make num_txq equal to num_rxq.
		 */
		vsi->num_txq = vsi->num_rxq;
	}

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
	/* Rx queue mapping */
	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
	/* q_mapping buffer holds the info for the first queue allocated for
	 * this VSI in the PF space and also the number of queues associated
	 * with this VSI.
	 */
	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
}

/**
 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
 * @ctxt: the VSI context being set
 * @vsi: the VSI being configured
 */
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
	u8 lut_type, hash_type;
B
Brett Creeley 已提交
725
	struct device *dev;
726 727 728
	struct ice_pf *pf;

	pf = vsi->back;
B
Brett Creeley 已提交
729
	dev = ice_pf_to_dev(pf);
730 731 732 733 734 735 736

	switch (vsi->type) {
	case ICE_VSI_PF:
		/* PF VSI will inherit RSS instance of PF */
		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
		break;
737 738 739 740 741
	case ICE_VSI_VF:
		/* VF VSI will gets a small RSS table which is a VSI LUT type */
		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
		break;
742
	case ICE_VSI_LB:
B
Brett Creeley 已提交
743
		dev_dbg(dev, "Unsupported VSI type %s\n",
744
			ice_vsi_type_str(vsi->type));
745
		return;
746
	default:
B
Brett Creeley 已提交
747
		dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
748 749 750 751 752 753 754 755 756 757 758 759
		return;
	}

	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
				((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
				 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}

/**
 * ice_vsi_init - Create and initialize a VSI
 * @vsi: the VSI being configured
760
 * @init_vsi: is this call creating a VSI
761 762 763 764
 *
 * This initializes a VSI context depending on the VSI type to be added and
 * passes it down to the add_vsi aq command to create a new VSI.
 */
765
static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
766 767 768
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
769
	struct ice_vsi_ctx *ctxt;
770
	struct device *dev;
771 772
	int ret = 0;

773
	dev = ice_pf_to_dev(pf);
774
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
775 776 777
	if (!ctxt)
		return -ENOMEM;

778
	ctxt->info = vsi->info;
779
	switch (vsi->type) {
780 781
	case ICE_VSI_LB:
		/* fall through */
782
	case ICE_VSI_PF:
783
		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
784
		break;
785
	case ICE_VSI_VF:
786
		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
787
		/* VF number here is the absolute VF number (0-255) */
788
		ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
789
		break;
790
	default:
791 792
		ret = -ENODEV;
		goto out;
793 794
	}

795
	ice_set_dflt_vsi_ctx(ctxt);
796 797
	/* if the switch is in VEB mode, allow VSI loopback */
	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
798
		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
799 800

	/* Set LUT type and HASH type if RSS is enabled */
801
	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
802
		ice_set_rss_vsi_ctx(ctxt, vsi);
803 804 805 806 807 808 809
		/* if updating VSI context, make sure to set valid_section:
		 * to indicate which section of VSI context being updated
		 */
		if (!init_vsi)
			ctxt->info.valid_sections |=
				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
	}
810

811 812
	ctxt->info.sw_id = vsi->port_info->sw_id;
	ice_vsi_setup_q_map(vsi, ctxt);
813 814 815 816 817 818
	if (!init_vsi) /* means VSI being updated */
		/* must to indicate which section of VSI context are
		 * being modified
		 */
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
819

B
Brett Creeley 已提交
820 821 822 823
	/* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
	 * respectively
	 */
	if (vsi->type == ICE_VSI_VF) {
824 825
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
B
Brett Creeley 已提交
826 827 828 829 830 831 832 833 834 835 836
		if (pf->vf[vsi->vf_id].spoofchk) {
			ctxt->info.sec_flags |=
				ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
				(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
				 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
		} else {
			ctxt->info.sec_flags &=
				~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
				  (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
				   ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
		}
837 838
	}

839 840 841 842 843 844 845
	/* Allow control frames out of main VSI */
	if (vsi->type == ICE_VSI_PF) {
		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
	}

846 847 848 849 850 851 852 853 854 855 856 857 858 859
	if (init_vsi) {
		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
		if (ret) {
			dev_err(dev, "Add VSI failed, err %d\n", ret);
			ret = -EIO;
			goto out;
		}
	} else {
		ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
		if (ret) {
			dev_err(dev, "Update VSI failed, err %d\n", ret);
			ret = -EIO;
			goto out;
		}
860 861 862
	}

	/* keep context for update VSI operations */
863
	vsi->info = ctxt->info;
864 865

	/* record VSI number returned */
866
	vsi->vsi_num = ctxt->vsi_num;
867

868 869
out:
	kfree(ctxt);
870 871 872
	return ret;
}

873 874 875 876 877 878 879 880 881 882
/**
 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
 * @vsi: ptr to the VSI
 *
 * This should only be called after ice_vsi_alloc() which allocates the
 * corresponding SW VSI structure and initializes num_queue_pairs for the
 * newly allocated VSI.
 *
 * Returns 0 on success or negative on failure
 */
883
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
884 885
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
886
	struct device *dev;
B
Brett Creeley 已提交
887
	u16 num_q_vectors;
888

B
Brett Creeley 已提交
889
	dev = ice_pf_to_dev(pf);
B
Brett Creeley 已提交
890 891 892 893 894
	/* SRIOV doesn't grab irq_tracker entries for each VSI */
	if (vsi->type == ICE_VSI_VF)
		return 0;

	if (vsi->base_vector) {
B
Brett Creeley 已提交
895
		dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
B
Brett Creeley 已提交
896
			vsi->vsi_num, vsi->base_vector);
897 898 899
		return -EEXIST;
	}

B
Brett Creeley 已提交
900 901 902 903 904
	num_q_vectors = vsi->num_q_vectors;
	/* reserve slots from OS requested IRQs */
	vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
				       vsi->idx);
	if (vsi->base_vector < 0) {
B
Brett Creeley 已提交
905
		dev_err(dev,
B
Brett Creeley 已提交
906 907
			"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
			num_q_vectors, vsi->vsi_num, vsi->base_vector);
908 909
		return -ENOENT;
	}
B
Brett Creeley 已提交
910
	pf->num_avail_sw_msix -= num_q_vectors;
911

912 913 914
	return 0;
}

915 916 917 918
/**
 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
 * @vsi: the VSI having rings deallocated
 */
919
static void ice_vsi_clear_rings(struct ice_vsi *vsi)
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
{
	int i;

	if (vsi->tx_rings) {
		for (i = 0; i < vsi->alloc_txq; i++) {
			if (vsi->tx_rings[i]) {
				kfree_rcu(vsi->tx_rings[i], rcu);
				vsi->tx_rings[i] = NULL;
			}
		}
	}
	if (vsi->rx_rings) {
		for (i = 0; i < vsi->alloc_rxq; i++) {
			if (vsi->rx_rings[i]) {
				kfree_rcu(vsi->rx_rings[i], rcu);
				vsi->rx_rings[i] = NULL;
			}
		}
	}
}

/**
 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
 * @vsi: VSI which is having rings allocated
 */
945
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
946 947
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
948
	struct device *dev;
949 950
	int i;

B
Brett Creeley 已提交
951
	dev = ice_pf_to_dev(pf);
952
	/* Allocate Tx rings */
953 954 955 956 957 958 959 960 961 962 963 964 965
	for (i = 0; i < vsi->alloc_txq; i++) {
		struct ice_ring *ring;

		/* allocate with kzalloc(), free with kfree_rcu() */
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);

		if (!ring)
			goto err_out;

		ring->q_index = i;
		ring->reg_idx = vsi->txq_map[i];
		ring->ring_active = false;
		ring->vsi = vsi;
B
Brett Creeley 已提交
966
		ring->dev = dev;
967
		ring->count = vsi->num_tx_desc;
968 969 970
		vsi->tx_rings[i] = ring;
	}

971
	/* Allocate Rx rings */
972 973 974 975 976 977 978 979 980 981 982 983 984
	for (i = 0; i < vsi->alloc_rxq; i++) {
		struct ice_ring *ring;

		/* allocate with kzalloc(), free with kfree_rcu() */
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
		if (!ring)
			goto err_out;

		ring->q_index = i;
		ring->reg_idx = vsi->rxq_map[i];
		ring->ring_active = false;
		ring->vsi = vsi;
		ring->netdev = vsi->netdev;
B
Brett Creeley 已提交
985
		ring->dev = dev;
986
		ring->count = vsi->num_rx_desc;
987 988 989 990 991 992 993 994 995 996
		vsi->rx_rings[i] = ring;
	}

	return 0;

err_out:
	ice_vsi_clear_rings(vsi);
	return -ENOMEM;
}

997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
/**
 * ice_vsi_manage_rss_lut - disable/enable RSS
 * @vsi: the VSI being changed
 * @ena: boolean value indicating if this is an enable or disable request
 *
 * In the event of disable request for RSS, this function will zero out RSS
 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
 * LUT.
 */
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{
	int err = 0;
	u8 *lut;

1011
	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	if (!lut)
		return -ENOMEM;

	if (ena) {
		if (vsi->rss_lut_user)
			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
		else
			ice_fill_rss_lut(lut, vsi->rss_table_size,
					 vsi->rss_size);
	}

	err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
1024
	kfree(lut);
1025 1026 1027
	return err;
}

1028 1029 1030 1031 1032 1033 1034 1035 1036
/**
 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
 * @vsi: VSI to be configured
 */
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
	struct ice_aqc_get_set_rss_keys *key;
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
B
Brett Creeley 已提交
1037
	struct device *dev;
1038 1039 1040
	int err = 0;
	u8 *lut;

B
Brett Creeley 已提交
1041
	dev = ice_pf_to_dev(pf);
1042 1043
	vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);

1044
	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1045 1046 1047 1048 1049 1050 1051 1052
	if (!lut)
		return -ENOMEM;

	if (vsi->rss_lut_user)
		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
	else
		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);

1053 1054
	status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
				    vsi->rss_table_size);
1055 1056

	if (status) {
B
Brett Creeley 已提交
1057
		dev_err(dev, "set_rss_lut failed, error %d\n", status);
1058 1059 1060 1061
		err = -EIO;
		goto ice_vsi_cfg_rss_exit;
	}

1062
	key = kzalloc(sizeof(*key), GFP_KERNEL);
1063 1064 1065 1066 1067 1068
	if (!key) {
		err = -ENOMEM;
		goto ice_vsi_cfg_rss_exit;
	}

	if (vsi->rss_hkey_user)
1069 1070 1071
		memcpy(key,
		       (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
		       ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1072
	else
1073 1074
		netdev_rss_key_fill((void *)key,
				    ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1075

1076
	status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
1077 1078

	if (status) {
B
Brett Creeley 已提交
1079
		dev_err(dev, "set_rss_key failed, error %d\n", status);
1080 1081 1082
		err = -EIO;
	}

1083
	kfree(key);
1084
ice_vsi_cfg_rss_exit:
1085
	kfree(lut);
1086 1087 1088
	return err;
}

1089
/**
1090
 * ice_add_mac_to_list - Add a MAC address filter entry to the list
1091 1092 1093 1094
 * @vsi: the VSI to be forwarded to
 * @add_list: pointer to the list which contains MAC filter entries
 * @macaddr: the MAC address to be added.
 *
1095
 * Adds MAC address filter entry to the temp list
1096 1097 1098 1099 1100 1101 1102 1103 1104
 *
 * Returns 0 on success or ENOMEM on failure.
 */
int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
			const u8 *macaddr)
{
	struct ice_fltr_list_entry *tmp;
	struct ice_pf *pf = vsi->back;

B
Brett Creeley 已提交
1105
	tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
1106 1107 1108 1109
	if (!tmp)
		return -ENOMEM;

	tmp->fltr_info.flag = ICE_FLTR_TX;
1110
	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1111 1112
	tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1113
	tmp->fltr_info.vsi_handle = vsi->idx;
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);

	INIT_LIST_HEAD(&tmp->list_entry);
	list_add(&tmp->list_entry, add_list);

	return 0;
}

/**
 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
 * @vsi: the VSI to be updated
 */
void ice_update_eth_stats(struct ice_vsi *vsi)
{
	struct ice_eth_stats *prev_es, *cur_es;
	struct ice_hw *hw = &vsi->back->hw;
	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */

	prev_es = &vsi->eth_stats_prev;
	cur_es = &vsi->eth_stats;

1135 1136
	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_bytes, &cur_es->rx_bytes);
1137

1138 1139
	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_unicast, &cur_es->rx_unicast);
1140

1141 1142
	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_multicast, &cur_es->rx_multicast);
1143

1144 1145
	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1146 1147 1148 1149

	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_discards, &cur_es->rx_discards);

1150 1151
	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_bytes, &cur_es->tx_bytes);
1152

1153 1154
	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_unicast, &cur_es->tx_unicast);
1155

1156 1157
	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_multicast, &cur_es->tx_multicast);
1158

1159 1160
	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188

	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_errors, &cur_es->tx_errors);

	vsi->stat_offsets_loaded = true;
}

/**
 * ice_free_fltr_list - free filter lists helper
 * @dev: pointer to the device struct
 * @h: pointer to the list head to be freed
 *
 * Helper function to free filter lists previously created using
 * ice_add_mac_to_list
 */
void ice_free_fltr_list(struct device *dev, struct list_head *h)
{
	struct ice_fltr_list_entry *e, *tmp;

	list_for_each_entry_safe(e, tmp, h, list_entry) {
		list_del(&e->list_entry);
		devm_kfree(dev, e);
	}
}

/**
 * ice_vsi_add_vlan - Add VSI membership for given VLAN
 * @vsi: the VSI being configured
1189
 * @vid: VLAN ID to be added
1190 1191 1192 1193 1194 1195 1196
 */
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
{
	struct ice_fltr_list_entry *tmp;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;
B
Brett Creeley 已提交
1197
	struct device *dev;
1198 1199
	int err = 0;

B
Brett Creeley 已提交
1200 1201
	dev = ice_pf_to_dev(pf);
	tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
1202 1203 1204 1205 1206 1207
	if (!tmp)
		return -ENOMEM;

	tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
	tmp->fltr_info.flag = ICE_FLTR_TX;
1208 1209
	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
	tmp->fltr_info.vsi_handle = vsi->idx;
1210 1211 1212 1213 1214 1215 1216 1217
	tmp->fltr_info.l_data.vlan.vlan_id = vid;

	INIT_LIST_HEAD(&tmp->list_entry);
	list_add(&tmp->list_entry, &tmp_add_list);

	status = ice_add_vlan(&pf->hw, &tmp_add_list);
	if (status) {
		err = -ENODEV;
B
Brett Creeley 已提交
1218 1219
		dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
			vsi->vsi_num);
1220 1221
	}

B
Brett Creeley 已提交
1222
	ice_free_fltr_list(dev, &tmp_add_list);
1223 1224 1225 1226 1227 1228
	return err;
}

/**
 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
 * @vsi: the VSI being configured
1229
 * @vid: VLAN ID to be removed
1230 1231 1232 1233 1234 1235 1236 1237
 *
 * Returns 0 on success and negative on failure
 */
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
1238
	enum ice_status status;
B
Brett Creeley 已提交
1239
	struct device *dev;
1240
	int err = 0;
1241

B
Brett Creeley 已提交
1242 1243
	dev = ice_pf_to_dev(pf);
	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
1244 1245 1246 1247
	if (!list)
		return -ENOMEM;

	list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1248
	list->fltr_info.vsi_handle = vsi->idx;
1249 1250 1251
	list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
	list->fltr_info.l_data.vlan.vlan_id = vid;
	list->fltr_info.flag = ICE_FLTR_TX;
1252
	list->fltr_info.src_id = ICE_SRC_ID_VSI;
1253 1254 1255 1256

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

1257
	status = ice_remove_vlan(&pf->hw, &tmp_add_list);
1258
	if (status == ICE_ERR_DOES_NOT_EXIST) {
B
Brett Creeley 已提交
1259
		dev_dbg(dev,
1260 1261 1262
			"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
			vid, vsi->vsi_num, status);
	} else if (status) {
B
Brett Creeley 已提交
1263
		dev_err(dev,
1264 1265 1266
			"Error removing VLAN %d on vsi %i error: %d\n",
			vid, vsi->vsi_num, status);
		err = -EIO;
1267 1268
	}

B
Brett Creeley 已提交
1269
	ice_free_fltr_list(dev, &tmp_add_list);
1270
	return err;
1271 1272
}

M
Maciej Fijalkowski 已提交
1273 1274 1275 1276 1277 1278
/**
 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
 * @vsi: VSI
 */
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
M
Maciej Fijalkowski 已提交
1279 1280 1281 1282
	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
		vsi->rx_buf_len = ICE_RXBUF_2048;
#if (PAGE_SIZE < 8192)
1283 1284
	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
M
Maciej Fijalkowski 已提交
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
		vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
		vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
	} else {
		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
#if (PAGE_SIZE < 8192)
		vsi->rx_buf_len = ICE_RXBUF_3072;
#else
		vsi->rx_buf_len = ICE_RXBUF_2048;
#endif
	}
M
Maciej Fijalkowski 已提交
1296 1297
}

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
/**
 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Rx VSI for operation.
 */
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
	u16 i;

1309 1310 1311
	if (vsi->type == ICE_VSI_VF)
		goto setup_rings;

M
Maciej Fijalkowski 已提交
1312
	ice_vsi_cfg_frame_size(vsi);
1313
setup_rings:
1314
	/* set up individual rings */
1315 1316
	for (i = 0; i < vsi->num_rxq; i++) {
		int err;
1317

1318 1319 1320 1321 1322 1323 1324
		err = ice_setup_rx_ctx(vsi->rx_rings[i]);
		if (err) {
			dev_err(&vsi->back->pdev->dev,
				"ice_setup_rx_ctx failed for RxQ %d, err %d\n",
				i, err);
			return err;
		}
1325
	}
1326 1327

	return 0;
1328 1329 1330 1331 1332
}

/**
 * ice_vsi_cfg_txqs - Configure the VSI for Tx
 * @vsi: the VSI being configured
1333
 * @rings: Tx ring array to be configured
1334 1335 1336 1337
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx VSI for operation.
 */
1338
static int
1339
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1340 1341
{
	struct ice_aqc_add_tx_qgrp *qg_buf;
1342
	u16 q_idx = 0;
1343
	int err = 0;
1344

1345
	qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
1346 1347 1348 1349 1350
	if (!qg_buf)
		return -ENOMEM;

	qg_buf->num_txqs = 1;

1351 1352 1353 1354
	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
		if (err)
			goto err_cfg_txqs;
1355
	}
1356

1357
err_cfg_txqs:
1358
	kfree(qg_buf);
1359 1360 1361
	return err;
}

1362 1363 1364 1365 1366 1367 1368 1369 1370
/**
 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx VSI for operation.
 */
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
1371
	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
1372 1373
}

M
Maciej Fijalkowski 已提交
1374 1375 1376 1377 1378 1379 1380 1381 1382
/**
 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx queues dedicated for XDP in given VSI for operation.
 */
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
	int ret;
	int i;

	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
	if (ret)
		return ret;

	for (i = 0; i < vsi->num_xdp_txq; i++)
		vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);

	return ret;
M
Maciej Fijalkowski 已提交
1394 1395
}

1396 1397 1398 1399 1400 1401 1402 1403
/**
 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
 * @intrl: interrupt rate limit in usecs
 * @gran: interrupt rate limit granularity in usecs
 *
 * This function converts a decimal interrupt rate limit in usecs to the format
 * expected by firmware.
 */
1404
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1405 1406 1407 1408 1409 1410 1411 1412
{
	u32 val = intrl / gran;

	if (val)
		return val | GLINT_RATE_INTRL_ENA_M;
	return 0;
}

1413 1414 1415
/**
 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
 * @vsi: the VSI being configured
1416 1417 1418
 *
 * This configures MSIX mode interrupts for the PF VSI, and should not be used
 * for the VF VSI.
1419 1420 1421 1422 1423 1424
 */
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 txq = 0, rxq = 0;
1425
	int i, q;
1426

1427
	for (i = 0; i < vsi->num_q_vectors; i++) {
1428
		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1429
		u16 reg_idx = q_vector->reg_idx;
1430

1431
		ice_cfg_itr(hw, q_vector);
1432

1433
		wr32(hw, GLINT_RATE(reg_idx),
1434
		     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447

		/* Both Transmit Queue Interrupt Cause Control register
		 * and Receive Queue Interrupt Cause control register
		 * expects MSIX_INDX field to be the vector index
		 * within the function space and not the absolute
		 * vector index across PF or across device.
		 * For SR-IOV VF VSIs queue vector index always starts
		 * with 1 since first vector index(0) is used for OICR
		 * in VF space. Since VMDq and other PF VSIs are within
		 * the PF function space, use the vector index that is
		 * tracked for this PF.
		 */
		for (q = 0; q < q_vector->num_ring_tx; q++) {
1448 1449
			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
					      q_vector->tx.itr_idx);
1450 1451 1452 1453
			txq++;
		}

		for (q = 0; q < q_vector->num_ring_rx; q++) {
1454 1455
			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
					      q_vector->rx.itr_idx);
1456 1457 1458 1459 1460
			rxq++;
		}
	}
}

1461 1462 1463 1464 1465 1466 1467
/**
 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
 * @vsi: the VSI being changed
 */
int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
	struct ice_hw *hw = &vsi->back->hw;
1468
	struct ice_vsi_ctx *ctxt;
1469
	enum ice_status status;
1470 1471
	int ret = 0;

1472
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1473 1474
	if (!ctxt)
		return -ENOMEM;
1475 1476 1477 1478 1479

	/* Here we are configuring the VSI to let the driver add VLAN tags by
	 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
	 * insertion happens in the Tx hot path, in ice_tx_map.
	 */
1480
	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1481

1482 1483 1484 1485
	/* Preserve existing VLAN strip setting */
	ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
				  ICE_AQ_VSI_VLAN_EMOD_M);

1486
	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1487

1488
	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1489
	if (status) {
1490
		dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
1491
			status, hw->adminq.sq_last_status);
1492 1493
		ret = -EIO;
		goto out;
1494 1495
	}

1496 1497
	vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
1498
	kfree(ctxt);
1499
	return ret;
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
}

/**
 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
 * @vsi: the VSI being changed
 * @ena: boolean value indicating if this is a enable or disable request
 */
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
	struct ice_hw *hw = &vsi->back->hw;
1510
	struct ice_vsi_ctx *ctxt;
1511
	enum ice_status status;
1512 1513
	int ret = 0;

1514
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1515 1516
	if (!ctxt)
		return -ENOMEM;
1517 1518 1519 1520 1521

	/* Here we are configuring what the VSI should do with the VLAN tag in
	 * the Rx packet. We can either leave the tag in the packet or put it in
	 * the Rx descriptor.
	 */
1522
	if (ena)
1523
		/* Strip VLAN tag from Rx packet and put it in the desc */
1524 1525
		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
	else
1526
		/* Disable stripping. Leave tag in packet */
1527
		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1528 1529

	/* Allow all packets untagged/tagged */
1530
	ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
1531

1532
	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1533

1534
	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1535
	if (status) {
1536
		dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
1537
			ena, status, hw->adminq.sq_last_status);
1538 1539
		ret = -EIO;
		goto out;
1540 1541
	}

1542 1543
	vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
1544
	kfree(ctxt);
1545
	return ret;
1546
}
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569

/**
 * ice_vsi_start_rx_rings - start VSI's Rx rings
 * @vsi: the VSI whose rings are to be started
 *
 * Returns 0 on success and a negative value on error
 */
int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
{
	return ice_vsi_ctrl_rx_rings(vsi, true);
}

/**
 * ice_vsi_stop_rx_rings - stop VSI's Rx rings
 * @vsi: the VSI
 *
 * Returns 0 on success and a negative value on error
 */
int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
{
	return ice_vsi_ctrl_rx_rings(vsi, false);
}

1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
/**
 * ice_vsi_stop_tx_rings - Disable Tx rings
 * @vsi: the VSI being configured
 * @rst_src: reset source
 * @rel_vmvf_num: Relative ID of VF/VM
 * @rings: Tx ring array to be stopped
 */
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
		      u16 rel_vmvf_num, struct ice_ring **rings)
{
1581
	u16 q_idx;
1582 1583 1584

	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
		return -EINVAL;
1585

1586 1587 1588
	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
		struct ice_txq_meta txq_meta = { };
		int status;
1589

1590 1591
		if (!rings || !rings[q_idx])
			return -EINVAL;
1592

1593 1594 1595
		ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
		status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
					      rings[q_idx], &txq_meta);
1596

1597 1598
		if (status)
			return status;
1599 1600
	}

1601
	return 0;
1602
}
1603

1604 1605 1606 1607
/**
 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
 * @vsi: the VSI being configured
 * @rst_src: reset source
1608
 * @rel_vmvf_num: Relative ID of VF/VM
1609
 */
1610 1611 1612
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
			  u16 rel_vmvf_num)
1613
{
1614
	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
1615 1616
}

M
Maciej Fijalkowski 已提交
1617 1618 1619 1620 1621 1622 1623 1624 1625
/**
 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
 * @vsi: the VSI being configured
 */
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
{
	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
}

1626 1627 1628 1629
/**
 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
 * @vsi: VSI to enable or disable VLAN pruning on
 * @ena: set to true to enable VLAN pruning and false to disable it
1630
 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
1631 1632 1633
 *
 * returns 0 if VSI is updated, negative otherwise
 */
1634
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
1635 1636
{
	struct ice_vsi_ctx *ctxt;
1637
	struct ice_pf *pf;
1638 1639 1640 1641 1642
	int status;

	if (!vsi)
		return -EINVAL;

1643
	pf = vsi->back;
1644
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1645 1646 1647 1648 1649
	if (!ctxt)
		return -ENOMEM;

	ctxt->info = vsi->info;

B
Brett Creeley 已提交
1650
	if (ena)
1651
		ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
B
Brett Creeley 已提交
1652
	else
1653 1654
		ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;

1655 1656
	if (!vlan_promisc)
		ctxt->info.valid_sections =
B
Brett Creeley 已提交
1657
			cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
1658

1659
	status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
1660
	if (status) {
1661
		netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
1662
			   ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
1663
			   pf->hw.adminq.sq_last_status);
1664 1665 1666 1667 1668
		goto err_out;
	}

	vsi->info.sw_flags2 = ctxt->info.sw_flags2;

1669
	kfree(ctxt);
1670 1671 1672
	return 0;

err_out:
1673
	kfree(ctxt);
1674 1675 1676
	return -EIO;
}

1677 1678 1679 1680 1681 1682 1683 1684
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
	struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;

	vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
	vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
}

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
/**
 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
 * @vsi: VSI to set the q_vectors register index on
 */
static int
ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
{
	u16 i;

	if (!vsi || !vsi->q_vectors)
		return -EINVAL;

	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		if (!q_vector) {
			dev_err(&vsi->back->pdev->dev,
				"Failed to set reg_idx on q_vector %d VSI %d\n",
				i, vsi->vsi_num);
			goto clear_reg_idx;
		}

B
Brett Creeley 已提交
1707 1708 1709 1710 1711 1712 1713 1714
		if (vsi->type == ICE_VSI_VF) {
			struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];

			q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
		} else {
			q_vector->reg_idx =
				q_vector->v_idx + vsi->base_vector;
		}
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729
	}

	return 0;

clear_reg_idx:
	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		if (q_vector)
			q_vector->reg_idx = 0;
	}

	return -EINVAL;
}

1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
/**
 * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
 * @vsi: the VSI being configured
 * @add_rule: boolean value to add or remove ethertype filter rule
 */
static void
ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;
B
Brett Creeley 已提交
1742
	struct device *dev;
1743

B
Brett Creeley 已提交
1744 1745
	dev = ice_pf_to_dev(pf);
	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
	if (!list)
		return;

	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
	list->fltr_info.fltr_act = ICE_DROP_PACKET;
	list->fltr_info.flag = ICE_FLTR_TX;
	list->fltr_info.src_id = ICE_SRC_ID_VSI;
	list->fltr_info.vsi_handle = vsi->idx;
	list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

	if (add_rule)
		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
	else
		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);

	if (status)
B
Brett Creeley 已提交
1765
		dev_err(dev,
1766 1767 1768
			"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
			vsi->vsi_num, status);

B
Brett Creeley 已提交
1769
	ice_free_fltr_list(dev, &tmp_add_list);
1770 1771
}

1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
/**
 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
 * @vsi: the VSI being configured
 * @tx: bool to determine Tx or Rx rule
 * @create: bool to determine create or remove Rule
 */
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
	struct ice_fltr_list_entry *list;
	struct ice_pf *pf = vsi->back;
	LIST_HEAD(tmp_add_list);
	enum ice_status status;
B
Brett Creeley 已提交
1784
	struct device *dev;
1785

B
Brett Creeley 已提交
1786 1787
	dev = ice_pf_to_dev(pf);
	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
1788 1789 1790 1791 1792
	if (!list)
		return;

	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
	list->fltr_info.vsi_handle = vsi->idx;
1793
	list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813

	if (tx) {
		list->fltr_info.fltr_act = ICE_DROP_PACKET;
		list->fltr_info.flag = ICE_FLTR_TX;
		list->fltr_info.src_id = ICE_SRC_ID_VSI;
	} else {
		list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
		list->fltr_info.flag = ICE_FLTR_RX;
		list->fltr_info.src_id = ICE_SRC_ID_LPORT;
	}

	INIT_LIST_HEAD(&list->list_entry);
	list_add(&list->list_entry, &tmp_add_list);

	if (create)
		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
	else
		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);

	if (status)
B
Brett Creeley 已提交
1814
		dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
1815 1816 1817
			create ? "adding" : "removing", tx ? "TX" : "RX",
			vsi->vsi_num, status);

B
Brett Creeley 已提交
1818
	ice_free_fltr_list(dev, &tmp_add_list);
1819 1820
}

1821 1822 1823 1824 1825
/**
 * ice_vsi_setup - Set up a VSI by a given type
 * @pf: board private structure
 * @pi: pointer to the port_info instance
 * @type: VSI type
1826
 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
 *         used only for ICE_VSI_VF VSI type. For other VSI types, should
 *         fill-in ICE_INVAL_VFID as input.
 *
 * This allocates the sw VSI structure and its queue resources.
 *
 * Returns pointer to the successfully allocated and configured VSI sw struct on
 * success, NULL on failure.
 */
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
1837
	      enum ice_vsi_type type, u16 vf_id)
1838 1839
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
B
Brett Creeley 已提交
1840
	struct device *dev = ice_pf_to_dev(pf);
1841
	enum ice_status status;
1842 1843 1844
	struct ice_vsi *vsi;
	int ret, i;

1845 1846 1847 1848 1849
	if (type == ICE_VSI_VF)
		vsi = ice_vsi_alloc(pf, type, vf_id);
	else
		vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);

1850 1851 1852 1853 1854 1855 1856
	if (!vsi) {
		dev_err(dev, "could not allocate VSI\n");
		return NULL;
	}

	vsi->port_info = pi;
	vsi->vsw = pf->first_sw;
1857 1858 1859
	if (vsi->type == ICE_VSI_PF)
		vsi->ethtype = ETH_P_PAUSE;

1860 1861
	if (vsi->type == ICE_VSI_VF)
		vsi->vf_id = vf_id;
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871

	if (ice_vsi_get_qs(vsi)) {
		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
			vsi->idx);
		goto unroll_get_qs;
	}

	/* set RSS capabilities */
	ice_vsi_set_rss_params(vsi);

1872
	/* set TC configuration */
1873 1874
	ice_vsi_set_tc_cfg(vsi);

1875
	/* create the VSI */
1876
	ret = ice_vsi_init(vsi, true);
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
	if (ret)
		goto unroll_get_qs;

	switch (vsi->type) {
	case ICE_VSI_PF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto unroll_vsi_init;

		ret = ice_vsi_setup_vector_base(vsi);
		if (ret)
			goto unroll_alloc_q_vector;

1890 1891 1892 1893
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto unroll_vector_base;

1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_vector_base;

		ice_vsi_map_rings_to_vectors(vsi);

		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
			ice_vsi_cfg_rss_lut_key(vsi);
		break;
1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
	case ICE_VSI_VF:
		/* VF driver will take care of creating netdev for this type and
		 * map queues to vectors through Virtchnl, PF driver only
		 * creates a VSI and corresponding structures for bookkeeping
		 * purpose
		 */
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto unroll_vsi_init;

		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_alloc_q_vector;

1921 1922 1923 1924
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto unroll_vector_base;

1925 1926 1927 1928 1929 1930
		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
			ice_vsi_cfg_rss_lut_key(vsi);
1931
		break;
1932 1933 1934 1935 1936
	case ICE_VSI_LB:
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_vsi_init;
		break;
1937
	default:
1938
		/* clean up the resources and exit */
1939 1940 1941 1942 1943
		goto unroll_vsi_init;
	}

	/* configure VSI nodes based on number of queues and TC's */
	for (i = 0; i < vsi->tc_cfg.numtc; i++)
1944
		max_txqs[i] = vsi->alloc_txq;
1945

1946 1947 1948
	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);
	if (status) {
B
Brett Creeley 已提交
1949
		dev_err(dev, "VSI %d failed lan queue config, error %d\n",
1950
			vsi->vsi_num, status);
1951 1952 1953
		goto unroll_vector_base;
	}

1954 1955 1956 1957 1958
	/* Add switch rule to drop all Tx Flow Control Frames, of look up
	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
	 * The rule is added once for PF VSI in order to create appropriate
	 * recipe, since VSI/VSI list is ignored with drop action...
1959 1960 1961
	 * Also add rules to handle LLDP Tx packets.  Tx LLDP packets need to
	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
	 * settings in the HW.
1962
	 */
1963
	if (!ice_is_safe_mode(pf))
T
Tony Nguyen 已提交
1964 1965
		if (vsi->type == ICE_VSI_PF) {
			ice_vsi_add_rem_eth_mac(vsi, true);
1966

T
Tony Nguyen 已提交
1967 1968 1969
			/* Tx LLDP packets */
			ice_cfg_sw_lldp(vsi, true, true);
		}
1970

1971 1972 1973
	return vsi;

unroll_vector_base:
1974
	/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
1975
	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
1976
	pf->num_avail_sw_msix += vsi->num_q_vectors;
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987
unroll_alloc_q_vector:
	ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
	ice_vsi_delete(vsi);
unroll_get_qs:
	ice_vsi_put_qs(vsi);
	ice_vsi_clear(vsi);

	return NULL;
}

1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
/**
 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
 * @vsi: the VSI being cleaned up
 */
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 txq = 0;
	u32 rxq = 0;
	int i, q;

B
Brett Creeley 已提交
2000
	for (i = 0; i < vsi->num_q_vectors; i++) {
2001
		struct ice_q_vector *q_vector = vsi->q_vectors[i];
B
Brett Creeley 已提交
2002
		u16 reg_idx = q_vector->reg_idx;
2003

B
Brett Creeley 已提交
2004 2005
		wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
		wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
2006 2007
		for (q = 0; q < q_vector->num_ring_tx; q++) {
			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
M
Maciej Fijalkowski 已提交
2008 2009 2010 2011 2012
			if (ice_is_xdp_ena_vsi(vsi)) {
				u32 xdp_txq = txq + vsi->num_xdp_txq;

				wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
			}
2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
			txq++;
		}

		for (q = 0; q < q_vector->num_ring_rx; q++) {
			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
			rxq++;
		}
	}

	ice_flush(hw);
}

/**
 * ice_vsi_free_irq - Free the IRQ association with the OS
 * @vsi: the VSI being configured
 */
void ice_vsi_free_irq(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
2032
	int base = vsi->base_vector;
2033
	int i;
2034

2035 2036
	if (!vsi->q_vectors || !vsi->irqs_ready)
		return;
2037

2038 2039 2040
	ice_vsi_release_msix(vsi);
	if (vsi->type == ICE_VSI_VF)
		return;
2041

2042 2043 2044 2045
	vsi->irqs_ready = false;
	ice_for_each_q_vector(vsi, i) {
		u16 vector = i + base;
		int irq_num;
2046

2047
		irq_num = pf->msix_entries[vector].vector;
2048

2049 2050 2051 2052 2053
		/* free only the irqs that were actually requested */
		if (!vsi->q_vectors[i] ||
		    !(vsi->q_vectors[i]->num_ring_tx ||
		      vsi->q_vectors[i]->num_ring_rx))
			continue;
2054

2055 2056
		/* clear the affinity notifier in the IRQ descriptor */
		irq_set_affinity_notifier(irq_num, NULL);
2057

2058 2059 2060
		/* clear the affinity_mask in the IRQ descriptor */
		irq_set_affinity_hint(irq_num, NULL);
		synchronize_irq(irq_num);
B
Brett Creeley 已提交
2061
		devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
	}
}

/**
 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
 * @vsi: the VSI having resources freed
 */
void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->tx_rings)
		return;

	ice_for_each_txq(vsi, i)
		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
			ice_free_tx_ring(vsi->tx_rings[i]);
}

/**
 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
 * @vsi: the VSI having resources freed
 */
void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->rx_rings)
		return;

	ice_for_each_rxq(vsi, i)
		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
			ice_free_rx_ring(vsi->rx_rings[i]);
}

2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
/**
 * ice_vsi_close - Shut down a VSI
 * @vsi: the VSI being shut down
 */
void ice_vsi_close(struct ice_vsi *vsi)
{
	if (!test_and_set_bit(__ICE_DOWN, vsi->state))
		ice_down(vsi);

	ice_vsi_free_irq(vsi);
	ice_vsi_free_tx_rings(vsi);
	ice_vsi_free_rx_rings(vsi);
}

2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
/**
 * ice_ena_vsi - resume a VSI
 * @vsi: the VSI being resume
 * @locked: is the rtnl_lock already held
 */
int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
	int err = 0;

	if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
		return 0;

	clear_bit(__ICE_NEEDS_RESTART, vsi->state);

	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
		if (netif_running(vsi->netdev)) {
			if (!locked)
				rtnl_lock();

			err = ice_open(vsi->netdev);

			if (!locked)
				rtnl_unlock();
		}
	}

	return err;
}

/**
 * ice_dis_vsi - pause a VSI
 * @vsi: the VSI being paused
 * @locked: is the rtnl_lock already held
 */
void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
	if (test_bit(__ICE_DOWN, vsi->state))
		return;

	set_bit(__ICE_NEEDS_RESTART, vsi->state);

	if (vsi->type == ICE_VSI_PF && vsi->netdev) {
		if (netif_running(vsi->netdev)) {
			if (!locked)
				rtnl_lock();

			ice_stop(vsi->netdev);

			if (!locked)
				rtnl_unlock();
		} else {
			ice_vsi_close(vsi);
		}
	}
}

2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
/**
 * ice_free_res - free a block of resources
 * @res: pointer to the resource
 * @index: starting index previously returned by ice_get_res
 * @id: identifier to track owner
 *
 * Returns number of resources freed
 */
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
{
	int count = 0;
	int i;

B
Brett Creeley 已提交
2180
	if (!res || index >= res->end)
2181 2182 2183
		return -EINVAL;

	id |= ICE_RES_VALID_BIT;
B
Brett Creeley 已提交
2184
	for (i = index; i < res->end && res->list[i] == id; i++) {
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201
		res->list[i] = 0;
		count++;
	}

	return count;
}

/**
 * ice_search_res - Search the tracker for a block of resources
 * @res: pointer to the resource
 * @needed: size of the block needed
 * @id: identifier to track owner
 *
 * Returns the base item index of the block, or -ENOMEM for error
 */
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
B
Brett Creeley 已提交
2202
	int start = 0, end = 0;
2203

B
Brett Creeley 已提交
2204
	if (needed > res->end)
2205 2206
		return -ENOMEM;

2207 2208 2209 2210 2211 2212
	id |= ICE_RES_VALID_BIT;

	do {
		/* skip already allocated entries */
		if (res->list[end++] & ICE_RES_VALID_BIT) {
			start = end;
B
Brett Creeley 已提交
2213
			if ((start + needed) > res->end)
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225
				break;
		}

		if (end == (start + needed)) {
			int i = start;

			/* there was enough, so assign it to the requestor */
			while (i != end)
				res->list[i++] = id;

			return start;
		}
B
Brett Creeley 已提交
2226
	} while (end < res->end);
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237

	return -ENOMEM;
}

/**
 * ice_get_res - get a block of resources
 * @pf: board private structure
 * @res: pointer to the resource
 * @needed: size of the block needed
 * @id: identifier to track owner
 *
B
Brett Creeley 已提交
2238
 * Returns the base item index of the block, or negative for error
2239 2240 2241 2242 2243 2244 2245 2246
 */
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
	if (!res || !pf)
		return -EINVAL;

	if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
B
Brett Creeley 已提交
2247
		dev_err(ice_pf_to_dev(pf),
2248 2249 2250 2251 2252
			"param err: needed=%d, num_entries = %d id=0x%04x\n",
			needed, res->num_entries, id);
		return -EINVAL;
	}

B
Brett Creeley 已提交
2253
	return ice_search_res(res, needed, id);
2254 2255 2256 2257 2258 2259 2260 2261
}

/**
 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
 * @vsi: the VSI being un-configured
 */
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
B
Brett Creeley 已提交
2262
	int base = vsi->base_vector;
2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 val;
	int i;

	/* disable interrupt causation from each queue */
	if (vsi->tx_rings) {
		ice_for_each_txq(vsi, i) {
			if (vsi->tx_rings[i]) {
				u16 reg;

				reg = vsi->tx_rings[i]->reg_idx;
				val = rd32(hw, QINT_TQCTL(reg));
				val &= ~QINT_TQCTL_CAUSE_ENA_M;
				wr32(hw, QINT_TQCTL(reg), val);
			}
		}
	}

	if (vsi->rx_rings) {
		ice_for_each_rxq(vsi, i) {
			if (vsi->rx_rings[i]) {
				u16 reg;

				reg = vsi->rx_rings[i]->reg_idx;
				val = rd32(hw, QINT_RQCTL(reg));
				val &= ~QINT_RQCTL_CAUSE_ENA_M;
				wr32(hw, QINT_RQCTL(reg), val);
			}
		}
	}

	/* disable each interrupt */
T
Tony Nguyen 已提交
2296 2297 2298
	ice_for_each_q_vector(vsi, i) {
		if (!vsi->q_vectors[i])
			continue;
2299
		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
T
Tony Nguyen 已提交
2300
	}
2301

2302
	ice_flush(hw);
2303

2304 2305 2306 2307
	/* don't call synchronize_irq() for VF's from the host */
	if (vsi->type == ICE_VSI_VF)
		return;

2308 2309
	ice_for_each_q_vector(vsi, i)
		synchronize_irq(pf->msix_entries[i + base].vector);
2310 2311
}

2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326
/**
 * ice_napi_del - Remove NAPI handler for the VSI
 * @vsi: VSI for which NAPI handler is to be removed
 */
void ice_napi_del(struct ice_vsi *vsi)
{
	int v_idx;

	if (!vsi->netdev)
		return;

	ice_for_each_q_vector(vsi, v_idx)
		netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}

2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
/**
 * ice_vsi_release - Delete a VSI and free its resources
 * @vsi: the VSI being removed
 *
 * Returns 0 on success or < 0 on error
 */
int ice_vsi_release(struct ice_vsi *vsi)
{
	struct ice_pf *pf;

	if (!vsi->back)
		return -ENODEV;
	pf = vsi->back;
2340

2341 2342 2343 2344 2345
	/* do not unregister while driver is in the reset recovery pending
	 * state. Since reset/rebuild happens through PF service task workqueue,
	 * it's not a good idea to unregister netdev that is associated to the
	 * PF that is running the work queue items currently. This is done to
	 * avoid check_flush_dependency() warning on this wq
2346
	 */
2347
	if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
2348 2349 2350 2351 2352 2353
		unregister_netdev(vsi->netdev);

	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
		ice_rss_clean(vsi);

	/* Disable VSI and free resources */
2354 2355
	if (vsi->type != ICE_VSI_LB)
		ice_vsi_dis_irq(vsi);
2356 2357
	ice_vsi_close(vsi);

B
Brett Creeley 已提交
2358 2359 2360 2361 2362
	/* SR-IOV determines needed MSIX resources all at once instead of per
	 * VSI since when VFs are spawned we know how many VFs there are and how
	 * many interrupts each VF needs. SR-IOV MSIX resources are also
	 * cleared in the same manner.
	 */
2363 2364
	if (vsi->type != ICE_VSI_VF) {
		/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2365
		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2366 2367
		pf->num_avail_sw_msix += vsi->num_q_vectors;
	}
2368

T
Tony Nguyen 已提交
2369 2370 2371 2372 2373 2374 2375 2376 2377 2378
	if (!ice_is_safe_mode(pf)) {
		if (vsi->type == ICE_VSI_PF) {
			ice_vsi_add_rem_eth_mac(vsi, false);
			ice_cfg_sw_lldp(vsi, true, false);
			/* The Rx rule will only exist to remove if the LLDP FW
			 * engine is currently stopped
			 */
			if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
				ice_cfg_sw_lldp(vsi, false, false);
		}
2379
	}
2380

2381
	ice_remove_vsi_fltr(&pf->hw, vsi->idx);
2382
	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2383 2384
	ice_vsi_delete(vsi);
	ice_vsi_free_q_vectors(vsi);
2385 2386 2387 2388 2389 2390 2391

	/* make sure unregister_netdev() was called by checking __ICE_DOWN */
	if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
		free_netdev(vsi->netdev);
		vsi->netdev = NULL;
	}

2392 2393 2394 2395 2396 2397 2398 2399
	ice_vsi_clear_rings(vsi);

	ice_vsi_put_qs(vsi);

	/* retain SW VSI data structure since it is needed to unregister and
	 * free VSI netdev when PF is not in reset recovery pending state,\
	 * for ex: during rmmod.
	 */
2400
	if (!ice_is_reset_in_progress(pf->state))
2401 2402 2403 2404 2405 2406 2407 2408
		ice_vsi_clear(vsi);

	return 0;
}

/**
 * ice_vsi_rebuild - Rebuild VSI after reset
 * @vsi: VSI to be rebuild
2409
 * @init_vsi: is this an initialization or a reconfigure of the VSI
2410 2411 2412
 *
 * Returns 0 on success and negative value on failure
 */
2413
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
2414 2415
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2416
	struct ice_vf *vf = NULL;
2417
	enum ice_status status;
2418
	struct ice_pf *pf;
2419 2420 2421 2422 2423
	int ret, i;

	if (!vsi)
		return -EINVAL;

2424
	pf = vsi->back;
2425 2426 2427
	if (vsi->type == ICE_VSI_VF)
		vf = &pf->vf[vsi->vf_id];

2428
	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2429
	ice_vsi_free_q_vectors(vsi);
2430

B
Brett Creeley 已提交
2431 2432 2433 2434 2435
	/* SR-IOV determines needed MSIX resources all at once instead of per
	 * VSI since when VFs are spawned we know how many VFs there are and how
	 * many interrupts each VF needs. SR-IOV MSIX resources are also
	 * cleared in the same manner.
	 */
2436 2437
	if (vsi->type != ICE_VSI_VF) {
		/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2438
		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2439
		pf->num_avail_sw_msix += vsi->num_q_vectors;
B
Brett Creeley 已提交
2440
		vsi->base_vector = 0;
2441 2442
	}

M
Maciej Fijalkowski 已提交
2443 2444 2445 2446 2447
	if (ice_is_xdp_ena_vsi(vsi))
		/* return value check can be skipped here, it always returns
		 * 0 if reset is in progress
		 */
		ice_destroy_xdp_rings(vsi);
2448
	ice_vsi_put_qs(vsi);
2449
	ice_vsi_clear_rings(vsi);
2450
	ice_vsi_free_arrays(vsi);
2451
	ice_dev_onetime_setup(&pf->hw);
2452 2453 2454 2455
	if (vsi->type == ICE_VSI_VF)
		ice_vsi_set_num_qs(vsi, vf->vf_id);
	else
		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
2456 2457 2458 2459 2460 2461

	ret = ice_vsi_alloc_arrays(vsi);
	if (ret < 0)
		goto err_vsi;

	ice_vsi_get_qs(vsi);
2462
	ice_vsi_set_tc_cfg(vsi);
2463 2464

	/* Initialize VSI struct elements and create VSI in FW */
2465
	ret = ice_vsi_init(vsi, init_vsi);
2466 2467 2468 2469 2470 2471 2472 2473 2474
	if (ret < 0)
		goto err_vsi;

	switch (vsi->type) {
	case ICE_VSI_PF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto err_rings;

2475 2476 2477 2478
		ret = ice_vsi_setup_vector_base(vsi);
		if (ret)
			goto err_vectors;

2479 2480 2481 2482
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto err_vectors;

2483 2484 2485 2486 2487
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto err_vectors;

		ice_vsi_map_rings_to_vectors(vsi);
M
Maciej Fijalkowski 已提交
2488 2489 2490 2491 2492 2493
		if (ice_is_xdp_ena_vsi(vsi)) {
			vsi->num_xdp_txq = vsi->alloc_txq;
			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
			if (ret)
				goto err_vectors;
		}
2494 2495 2496 2497
		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
2498
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2499
			ice_vsi_cfg_rss_lut_key(vsi);
2500
		break;
2501 2502 2503 2504 2505
	case ICE_VSI_VF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto err_rings;

2506 2507 2508 2509
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto err_vectors;

2510 2511 2512 2513 2514
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto err_vectors;

		break;
2515 2516 2517 2518 2519
	default:
		break;
	}

	/* configure VSI nodes based on number of queues and TC's */
M
Maciej Fijalkowski 已提交
2520
	for (i = 0; i < vsi->tc_cfg.numtc; i++) {
2521
		max_txqs[i] = vsi->alloc_txq;
2522

M
Maciej Fijalkowski 已提交
2523 2524 2525 2526
		if (ice_is_xdp_ena_vsi(vsi))
			max_txqs[i] += vsi->num_xdp_txq;
	}

2527 2528 2529
	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);
	if (status) {
B
Brett Creeley 已提交
2530
		dev_err(ice_pf_to_dev(pf),
2531
			"VSI %d failed lan queue config, error %d\n",
2532
			vsi->vsi_num, status);
2533 2534 2535 2536 2537 2538
		if (init_vsi) {
			ret = -EIO;
			goto err_vectors;
		} else {
			return ice_schedule_reset(pf, ICE_RESET_PFR);
		}
2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
	}
	return 0;

err_vectors:
	ice_vsi_free_q_vectors(vsi);
err_rings:
	if (vsi->netdev) {
		vsi->current_netdev_flags = 0;
		unregister_netdev(vsi->netdev);
		free_netdev(vsi->netdev);
		vsi->netdev = NULL;
	}
err_vsi:
	ice_vsi_clear(vsi);
2553
	set_bit(__ICE_RESET_FAILED, pf->state);
2554 2555 2556
	return ret;
}

2557
/**
2558
 * ice_is_reset_in_progress - check for a reset in progress
2559
 * @state: PF state field
2560
 */
2561
bool ice_is_reset_in_progress(unsigned long *state)
2562
{
2563
	return test_bit(__ICE_RESET_OICR_RECV, state) ||
D
Dave Ertman 已提交
2564
	       test_bit(__ICE_DCBNL_DEVRESET, state) ||
2565 2566 2567
	       test_bit(__ICE_PFR_REQ, state) ||
	       test_bit(__ICE_CORER_REQ, state) ||
	       test_bit(__ICE_GLOBR_REQ, state);
2568
}
2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597

#ifdef CONFIG_DCB
/**
 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
 * @vsi: VSI being configured
 * @ctx: the context buffer returned from AQ VSI update command
 */
static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
{
	vsi->info.mapping_flags = ctx->info.mapping_flags;
	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
	       sizeof(vsi->info.q_mapping));
	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
	       sizeof(vsi->info.tc_mapping));
}

/**
 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
 * @vsi: VSI to be configured
 * @ena_tc: TC bitmap
 *
 * VSI queues expected to be quiesced before calling this function
 */
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
	struct ice_vsi_ctx *ctx;
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
B
Brett Creeley 已提交
2598
	struct device *dev;
2599 2600 2601
	int i, ret = 0;
	u8 num_tc = 0;

B
Brett Creeley 已提交
2602 2603
	dev = ice_pf_to_dev(pf);

2604 2605 2606 2607 2608
	ice_for_each_traffic_class(i) {
		/* build bitmap of enabled TCs */
		if (ena_tc & BIT(i))
			num_tc++;
		/* populate max_txqs per TC */
2609
		max_txqs[i] = vsi->alloc_txq;
2610 2611 2612 2613 2614
	}

	vsi->tc_cfg.ena_tc = ena_tc;
	vsi->tc_cfg.numtc = num_tc;

2615
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
	if (!ctx)
		return -ENOMEM;

	ctx->vf_num = 0;
	ctx->info = vsi->info;

	ice_vsi_setup_q_map(vsi, ctx);

	/* must to indicate which section of VSI context are being modified */
	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
	status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
	if (status) {
B
Brett Creeley 已提交
2628
		dev_info(dev, "Failed VSI Update\n");
2629 2630 2631 2632 2633 2634 2635 2636
		ret = -EIO;
		goto out;
	}

	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);

	if (status) {
B
Brett Creeley 已提交
2637
		dev_err(dev, "VSI %d failed TC config, error %d\n",
2638 2639 2640 2641 2642 2643 2644 2645 2646
			vsi->vsi_num, status);
		ret = -EIO;
		goto out;
	}
	ice_vsi_update_q_map(vsi, ctx);
	vsi->info.valid_sections = 0;

	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
out:
2647
	kfree(ctx);
2648 2649 2650
	return ret;
}
#endif /* CONFIG_DCB */
2651

2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670
/**
 * ice_nvm_version_str - format the NVM version strings
 * @hw: ptr to the hardware info
 */
char *ice_nvm_version_str(struct ice_hw *hw)
{
	u8 oem_ver, oem_patch, ver_hi, ver_lo;
	static char buf[ICE_NVM_VER_LEN];
	u16 oem_build;

	ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
			    &ver_lo);

	snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
		 hw->nvm.eetrack, oem_ver, oem_build, oem_patch);

	return buf;
}

2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715
/**
 * ice_update_ring_stats - Update ring statistics
 * @ring: ring to update
 * @cont: used to increment per-vector counters
 * @pkts: number of processed packets
 * @bytes: number of processed bytes
 *
 * This function assumes that caller has acquired a u64_stats_sync lock.
 */
static void
ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
		      u64 pkts, u64 bytes)
{
	ring->stats.bytes += bytes;
	ring->stats.pkts += pkts;
	cont->total_bytes += bytes;
	cont->total_pkts += pkts;
}

/**
 * ice_update_tx_ring_stats - Update Tx ring specific counters
 * @tx_ring: ring to update
 * @pkts: number of processed packets
 * @bytes: number of processed bytes
 */
void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
{
	u64_stats_update_begin(&tx_ring->syncp);
	ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
	u64_stats_update_end(&tx_ring->syncp);
}

/**
 * ice_update_rx_ring_stats - Update Rx ring specific counters
 * @rx_ring: ring to update
 * @pkts: number of processed packets
 * @bytes: number of processed bytes
 */
void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
{
	u64_stats_update_begin(&rx_ring->syncp);
	ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
	u64_stats_update_end(&rx_ring->syncp);
}

2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
/**
 * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
 * @vsi: the VSI being configured MAC filter
 * @macaddr: the MAC address to be added.
 * @set: Add or delete a MAC filter
 *
 * Adds or removes MAC address filter entry for VF VSI
 */
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
{
	LIST_HEAD(tmp_add_list);
	enum ice_status status;

	 /* Update MAC filter list to be added or removed for a VSI */
	if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
		status = ICE_ERR_NO_MEMORY;
		goto cfg_mac_fltr_exit;
	}

	if (set)
		status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
	else
		status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);

cfg_mac_fltr_exit:
	ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
	return status;
}
2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862

/**
 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
 * @sw: switch to check if its default forwarding VSI is free
 *
 * Return true if the default forwarding VSI is already being used, else returns
 * false signalling that it's available to use.
 */
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
{
	return (sw->dflt_vsi && sw->dflt_vsi_ena);
}

/**
 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
 * @sw: switch for the default forwarding VSI to compare against
 * @vsi: VSI to compare against default forwarding VSI
 *
 * If this VSI passed in is the default forwarding VSI then return true, else
 * return false
 */
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
	return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
}

/**
 * ice_set_dflt_vsi - set the default forwarding VSI
 * @sw: switch used to assign the default forwarding VSI
 * @vsi: VSI getting set as the default forwarding VSI on the switch
 *
 * If the VSI passed in is already the default VSI and it's enabled just return
 * success.
 *
 * If there is already a default VSI on the switch and it's enabled then return
 * -EEXIST since there can only be one default VSI per switch.
 *
 *  Otherwise try to set the VSI passed in as the switch's default VSI and
 *  return the result.
 */
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
	enum ice_status status;
	struct device *dev;

	if (!sw || !vsi)
		return -EINVAL;

	dev = ice_pf_to_dev(vsi->back);

	/* the VSI passed in is already the default VSI */
	if (ice_is_vsi_dflt_vsi(sw, vsi)) {
		dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
			vsi->vsi_num);
		return 0;
	}

	/* another VSI is already the default VSI for this switch */
	if (ice_is_dflt_vsi_in_use(sw)) {
		dev_err(dev,
			"Default forwarding VSI %d already in use, disable it and try again\n",
			sw->dflt_vsi->vsi_num);
		return -EEXIST;
	}

	status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
	if (status) {
		dev_err(dev,
			"Failed to set VSI %d as the default forwarding VSI, error %d\n",
			vsi->vsi_num, status);
		return -EIO;
	}

	sw->dflt_vsi = vsi;
	sw->dflt_vsi_ena = true;

	return 0;
}

/**
 * ice_clear_dflt_vsi - clear the default forwarding VSI
 * @sw: switch used to clear the default VSI
 *
 * If the switch has no default VSI or it's not enabled then return error.
 *
 * Otherwise try to clear the default VSI and return the result.
 */
int ice_clear_dflt_vsi(struct ice_sw *sw)
{
	struct ice_vsi *dflt_vsi;
	enum ice_status status;
	struct device *dev;

	if (!sw)
		return -EINVAL;

	dev = ice_pf_to_dev(sw->pf);

	dflt_vsi = sw->dflt_vsi;

	/* there is no default VSI configured */
	if (!ice_is_dflt_vsi_in_use(sw))
		return -ENODEV;

	status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
				  ICE_FLTR_RX);
	if (status) {
		dev_err(dev,
			"Failed to clear the default forwarding VSI %d, error %d\n",
			dflt_vsi->vsi_num, status);
		return -EIO;
	}

	sw->dflt_vsi = NULL;
	sw->dflt_vsi_ena = false;

	return 0;
}