ice_lib.c 81.1 KB
Newer Older
1 2 3 4
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */

#include "ice.h"
A
Anirudh Venkataramanan 已提交
5
#include "ice_base.h"
6
#include "ice_flow.h"
7
#include "ice_lib.h"
8
#include "ice_fltr.h"
9
#include "ice_dcb_lib.h"
10
#include "ice_devlink.h"
11

12 13
/**
 * ice_vsi_type_str - maps VSI type enum to string equivalents
14
 * @vsi_type: VSI type enum
15
 */
16
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
17
{
18
	switch (vsi_type) {
19 20 21 22
	case ICE_VSI_PF:
		return "ICE_VSI_PF";
	case ICE_VSI_VF:
		return "ICE_VSI_VF";
23 24
	case ICE_VSI_CTRL:
		return "ICE_VSI_CTRL";
25 26 27 28 29 30 31
	case ICE_VSI_LB:
		return "ICE_VSI_LB";
	default:
		return "unknown";
	}
}

32
/**
33
 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
34 35
 * @vsi: the VSI being configured
 * @ena: start or stop the Rx rings
36 37 38 39 40
 *
 * First enable/disable all of the Rx rings, flush any remaining writes, and
 * then verify that they have all been enabled/disabled successfully. This will
 * let all of the register writes complete when enabling/disabling the Rx rings
 * before waiting for the change in hardware to complete.
41
 */
42
static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
43
{
K
Karol Kolacinski 已提交
44 45
	int ret = 0;
	u16 i;
46

47 48 49 50 51
	for (i = 0; i < vsi->num_rxq; i++)
		ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);

	ice_flush(&vsi->back->hw);

52
	for (i = 0; i < vsi->num_rxq; i++) {
53
		ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
54
		if (ret)
55 56 57 58 59 60
			break;
	}

	return ret;
}

61 62 63 64 65 66 67
/**
 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
 * @vsi: VSI pointer
 *
 * On error: returns error code (negative)
 * On success: returns 0
 */
68
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
69 70
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
71 72 73
	struct device *dev;

	dev = ice_pf_to_dev(pf);
74 75

	/* allocate memory for both Tx and Rx ring pointers */
B
Brett Creeley 已提交
76
	vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
77
				     sizeof(*vsi->tx_rings), GFP_KERNEL);
78
	if (!vsi->tx_rings)
79
		return -ENOMEM;
80

B
Brett Creeley 已提交
81
	vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
82
				     sizeof(*vsi->rx_rings), GFP_KERNEL);
83
	if (!vsi->rx_rings)
84 85
		goto err_rings;

M
Maciej Fijalkowski 已提交
86
	/* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
B
Brett Creeley 已提交
87
	vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
88 89 90 91 92
				    sizeof(*vsi->txq_map), GFP_KERNEL);

	if (!vsi->txq_map)
		goto err_txq_map;

B
Brett Creeley 已提交
93
	vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
94 95 96 97
				    sizeof(*vsi->rxq_map), GFP_KERNEL);
	if (!vsi->rxq_map)
		goto err_rxq_map;

98 99 100 101
	/* There is no need to allocate q_vectors for a loopback VSI. */
	if (vsi->type == ICE_VSI_LB)
		return 0;

102
	/* allocate memory for q_vector pointers */
B
Brett Creeley 已提交
103
	vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
104 105 106
				      sizeof(*vsi->q_vectors), GFP_KERNEL);
	if (!vsi->q_vectors)
		goto err_vectors;
107 108 109 110

	return 0;

err_vectors:
B
Brett Creeley 已提交
111
	devm_kfree(dev, vsi->rxq_map);
112
err_rxq_map:
B
Brett Creeley 已提交
113
	devm_kfree(dev, vsi->txq_map);
114
err_txq_map:
B
Brett Creeley 已提交
115
	devm_kfree(dev, vsi->rx_rings);
116
err_rings:
B
Brett Creeley 已提交
117
	devm_kfree(dev, vsi->tx_rings);
118 119 120 121
	return -ENOMEM;
}

/**
122 123 124 125 126 127 128
 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
 * @vsi: the VSI being configured
 */
static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
	switch (vsi->type) {
	case ICE_VSI_PF:
129
	case ICE_VSI_CTRL:
130
	case ICE_VSI_LB:
131 132 133 134 135 136 137 138
		/* a user could change the values of num_[tr]x_desc using
		 * ethtool -G so we should keep those values instead of
		 * overwriting them with the defaults.
		 */
		if (!vsi->num_rx_desc)
			vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
		if (!vsi->num_tx_desc)
			vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
139 140
		break;
	default:
141
		dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
142 143 144 145 146 147 148
			vsi->type);
		break;
	}
}

/**
 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
149
 * @vsi: the VSI being configured
150
 * @vf_id: ID of the VF being configured
151 152 153
 *
 * Return 0 on success and a negative value on error
 */
154
static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
155 156
{
	struct ice_pf *pf = vsi->back;
157 158 159 160 161
	struct ice_vf *vf = NULL;

	if (vsi->type == ICE_VSI_VF)
		vsi->vf_id = vf_id;

162 163
	switch (vsi->type) {
	case ICE_VSI_PF:
164 165
		vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
				       num_online_cpus());
166 167 168 169
		if (vsi->req_txq) {
			vsi->alloc_txq = vsi->req_txq;
			vsi->num_txq = vsi->req_txq;
		}
170 171 172 173

		pf->num_lan_tx = vsi->alloc_txq;

		/* only 1 Rx queue unless RSS is enabled */
174
		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
175
			vsi->alloc_rxq = 1;
176
		} else {
177 178
			vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
					       num_online_cpus());
179 180 181 182 183
			if (vsi->req_rxq) {
				vsi->alloc_rxq = vsi->req_rxq;
				vsi->num_rxq = vsi->req_rxq;
			}
		}
184 185 186

		pf->num_lan_rx = vsi->alloc_rxq;

187
		vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
188
		break;
189
	case ICE_VSI_VF:
190 191 192
		vf = &pf->vf[vsi->vf_id];
		vsi->alloc_txq = vf->num_vf_qs;
		vsi->alloc_rxq = vf->num_vf_qs;
193
		/* pf->num_msix_per_vf includes (VF miscellaneous vector +
194
		 * data queue interrupts). Since vsi->num_q_vectors is number
195 196
		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
		 * original vector count
197
		 */
198
		vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
199
		break;
200 201 202 203 204
	case ICE_VSI_CTRL:
		vsi->alloc_txq = 1;
		vsi->alloc_rxq = 1;
		vsi->num_q_vectors = 1;
		break;
205 206 207 208
	case ICE_VSI_LB:
		vsi->alloc_txq = 1;
		vsi->alloc_rxq = 1;
		break;
209
	default:
B
Brett Creeley 已提交
210
		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
211 212
		break;
	}
213 214

	ice_vsi_set_num_desc(vsi);
215 216 217 218 219 220 221 222 223 224 225
}

/**
 * ice_get_free_slot - get the next non-NULL location index in array
 * @array: array to search
 * @size: size of the array
 * @curr: last known occupied index to be used as a search hint
 *
 * void * is being used to keep the functionality generic. This lets us use this
 * function on any array of pointers.
 */
226
static int ice_get_free_slot(void *array, int size, int curr)
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
{
	int **tmp_array = (int **)array;
	int next;

	if (curr < (size - 1) && !tmp_array[curr + 1]) {
		next = curr + 1;
	} else {
		int i = 0;

		while ((i < size) && (tmp_array[i]))
			i++;
		if (i == size)
			next = ICE_NO_VSI;
		else
			next = i;
	}
	return next;
}

246 247 248 249
/**
 * ice_vsi_delete - delete a VSI from the switch
 * @vsi: pointer to VSI being removed
 */
250
static void ice_vsi_delete(struct ice_vsi *vsi)
251 252
{
	struct ice_pf *pf = vsi->back;
253
	struct ice_vsi_ctx *ctxt;
254 255
	enum ice_status status;

256
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
257 258 259
	if (!ctxt)
		return;

260
	if (vsi->type == ICE_VSI_VF)
261 262
		ctxt->vf_num = vsi->vf_id;
	ctxt->vsi_num = vsi->vsi_num;
263

264
	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
265

266
	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
267
	if (status)
268 269
		dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
			vsi->vsi_num, ice_stat_str(status));
270

271
	kfree(ctxt);
272 273
}

274
/**
275
 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
276 277
 * @vsi: pointer to VSI being cleared
 */
278
static void ice_vsi_free_arrays(struct ice_vsi *vsi)
279 280
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
281 282 283
	struct device *dev;

	dev = ice_pf_to_dev(pf);
284 285

	/* free the ring and vector containers */
286
	if (vsi->q_vectors) {
B
Brett Creeley 已提交
287
		devm_kfree(dev, vsi->q_vectors);
288 289 290
		vsi->q_vectors = NULL;
	}
	if (vsi->tx_rings) {
B
Brett Creeley 已提交
291
		devm_kfree(dev, vsi->tx_rings);
292 293 294
		vsi->tx_rings = NULL;
	}
	if (vsi->rx_rings) {
B
Brett Creeley 已提交
295
		devm_kfree(dev, vsi->rx_rings);
296 297
		vsi->rx_rings = NULL;
	}
298
	if (vsi->txq_map) {
B
Brett Creeley 已提交
299
		devm_kfree(dev, vsi->txq_map);
300 301 302
		vsi->txq_map = NULL;
	}
	if (vsi->rxq_map) {
B
Brett Creeley 已提交
303
		devm_kfree(dev, vsi->rxq_map);
304 305
		vsi->rxq_map = NULL;
	}
306 307 308 309 310 311 312 313 314 315 316
}

/**
 * ice_vsi_clear - clean up and deallocate the provided VSI
 * @vsi: pointer to VSI being cleared
 *
 * This deallocates the VSI's queue resources, removes it from the PF's
 * VSI array if necessary, and deallocates the VSI
 *
 * Returns 0 on success, negative on failure
 */
317
static int ice_vsi_clear(struct ice_vsi *vsi)
318 319
{
	struct ice_pf *pf = NULL;
B
Brett Creeley 已提交
320
	struct device *dev;
321 322 323 324 325 326 327 328

	if (!vsi)
		return 0;

	if (!vsi->back)
		return -EINVAL;

	pf = vsi->back;
B
Brett Creeley 已提交
329
	dev = ice_pf_to_dev(pf);
330 331

	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
B
Brett Creeley 已提交
332
		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
333 334 335 336 337 338 339
		return -EINVAL;
	}

	mutex_lock(&pf->sw_mutex);
	/* updates the PF for this cleared VSI */

	pf->vsi[vsi->idx] = NULL;
340
	if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
341 342
		pf->next_vsi = vsi->idx;

343
	ice_vsi_free_arrays(vsi);
344
	mutex_unlock(&pf->sw_mutex);
B
Brett Creeley 已提交
345
	devm_kfree(dev, vsi);
346 347 348 349

	return 0;
}

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
/**
 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
 * @irq: interrupt number
 * @data: pointer to a q_vector
 */
static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
{
	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;

	if (!q_vector->tx.ring)
		return IRQ_HANDLED;

#define FDIR_RX_DESC_CLEAN_BUDGET 64
	ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
	ice_clean_ctrl_tx_irq(q_vector->tx.ring);

	return IRQ_HANDLED;
}

369 370 371 372 373
/**
 * ice_msix_clean_rings - MSIX mode Interrupt Handler
 * @irq: interrupt number
 * @data: pointer to a q_vector
 */
374
static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
375 376 377 378 379 380 381 382 383 384 385
{
	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;

	if (!q_vector->tx.ring && !q_vector->rx.ring)
		return IRQ_HANDLED;

	napi_schedule(&q_vector->napi);

	return IRQ_HANDLED;
}

386 387 388
/**
 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
 * @pf: board private structure
389
 * @vsi_type: type of VSI
390
 * @vf_id: ID of the VF being configured
391 392 393
 *
 * returns a pointer to a VSI on success, NULL on failure.
 */
394
static struct ice_vsi *
395
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
396
{
B
Brett Creeley 已提交
397
	struct device *dev = ice_pf_to_dev(pf);
398 399 400 401 402 403 404 405 406 407
	struct ice_vsi *vsi = NULL;

	/* Need to protect the allocation of the VSIs at the PF level */
	mutex_lock(&pf->sw_mutex);

	/* If we have already allocated our maximum number of VSIs,
	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
	 * is available to be populated
	 */
	if (pf->next_vsi == ICE_NO_VSI) {
B
Brett Creeley 已提交
408
		dev_dbg(dev, "out of VSI slots!\n");
409 410 411
		goto unlock_pf;
	}

B
Brett Creeley 已提交
412
	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
413 414 415
	if (!vsi)
		goto unlock_pf;

416
	vsi->type = vsi_type;
417 418
	vsi->back = pf;
	set_bit(__ICE_DOWN, vsi->state);
419

420
	if (vsi_type == ICE_VSI_VF)
421 422 423
		ice_vsi_set_num_qs(vsi, vf_id);
	else
		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
424 425 426

	switch (vsi->type) {
	case ICE_VSI_PF:
427
		if (ice_vsi_alloc_arrays(vsi))
428 429 430 431 432
			goto err_rings;

		/* Setup default MSIX irq handler for VSI */
		vsi->irq_handler = ice_msix_clean_rings;
		break;
433 434 435 436 437 438 439
	case ICE_VSI_CTRL:
		if (ice_vsi_alloc_arrays(vsi))
			goto err_rings;

		/* Setup ctrl VSI MSIX irq handler */
		vsi->irq_handler = ice_msix_clean_ctrl_vsi;
		break;
440
	case ICE_VSI_VF:
441
		if (ice_vsi_alloc_arrays(vsi))
442 443
			goto err_rings;
		break;
444 445 446 447
	case ICE_VSI_LB:
		if (ice_vsi_alloc_arrays(vsi))
			goto err_rings;
		break;
448
	default:
B
Brett Creeley 已提交
449
		dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
450 451 452
		goto unlock_pf;
	}

453 454 455 456 457 458 459 460 461
	if (vsi->type == ICE_VSI_CTRL) {
		/* Use the last VSI slot as the index for the control VSI */
		vsi->idx = pf->num_alloc_vsi - 1;
		pf->ctrl_vsi_idx = vsi->idx;
		pf->vsi[vsi->idx] = vsi;
	} else {
		/* fill slot and make note of the index */
		vsi->idx = pf->next_vsi;
		pf->vsi[pf->next_vsi] = vsi;
462

463 464 465 466
		/* prepare pf->next_vsi for next use */
		pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
						 pf->next_vsi);
	}
467 468 469
	goto unlock_pf;

err_rings:
B
Brett Creeley 已提交
470
	devm_kfree(dev, vsi);
471 472 473 474 475 476
	vsi = NULL;
unlock_pf:
	mutex_unlock(&pf->sw_mutex);
	return vsi;
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
/**
 * ice_alloc_fd_res - Allocate FD resource for a VSI
 * @vsi: pointer to the ice_vsi
 *
 * This allocates the FD resources
 *
 * Returns 0 on success, -EPERM on no-op or -EIO on failure
 */
static int ice_alloc_fd_res(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	u32 g_val, b_val;

	/* Flow Director filters are only allocated/assigned to the PF VSI which
	 * passes the traffic. The CTRL VSI is only used to add/delete filters
	 * so we don't allocate resources to it
	 */

	/* FD filters from guaranteed pool per VSI */
	g_val = pf->hw.func_caps.fd_fltr_guar;
	if (!g_val)
		return -EPERM;

	/* FD filters from best effort pool */
	b_val = pf->hw.func_caps.fd_fltr_best_effort;
	if (!b_val)
		return -EPERM;

	if (vsi->type != ICE_VSI_PF)
		return -EPERM;

	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
		return -EPERM;

	vsi->num_gfltr = g_val / pf->num_alloc_vsi;

	/* each VSI gets same "best_effort" quota */
	vsi->num_bfltr = b_val;

	return 0;
}

519 520 521 522 523 524
/**
 * ice_vsi_get_qs - Assign queues from PF to VSI
 * @vsi: the VSI to assign queues to
 *
 * Returns 0 on success and a negative value on error
 */
525
static int ice_vsi_get_qs(struct ice_vsi *vsi)
526
{
527 528 529 530
	struct ice_pf *pf = vsi->back;
	struct ice_qs_cfg tx_qs_cfg = {
		.qs_mutex = &pf->avail_q_mutex,
		.pf_map = pf->avail_txqs,
531
		.pf_map_size = pf->max_pf_txqs,
532 533 534 535
		.q_count = vsi->alloc_txq,
		.scatter_count = ICE_MAX_SCATTER_TXQS,
		.vsi_map = vsi->txq_map,
		.vsi_map_offset = 0,
536
		.mapping_mode = ICE_VSI_MAP_CONTIG
537 538 539 540
	};
	struct ice_qs_cfg rx_qs_cfg = {
		.qs_mutex = &pf->avail_q_mutex,
		.pf_map = pf->avail_rxqs,
541
		.pf_map_size = pf->max_pf_rxqs,
542 543 544 545
		.q_count = vsi->alloc_rxq,
		.scatter_count = ICE_MAX_SCATTER_RXQS,
		.vsi_map = vsi->rxq_map,
		.vsi_map_offset = 0,
546
		.mapping_mode = ICE_VSI_MAP_CONTIG
547
	};
548
	int ret;
549

550
	ret = __ice_vsi_get_qs(&tx_qs_cfg);
551 552 553
	if (ret)
		return ret;
	vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
554

555 556 557 558 559 560
	ret = __ice_vsi_get_qs(&rx_qs_cfg);
	if (ret)
		return ret;
	vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;

	return 0;
561 562
}

563 564 565 566
/**
 * ice_vsi_put_qs - Release queues from VSI to PF
 * @vsi: the VSI that is going to release queues
 */
567
static void ice_vsi_put_qs(struct ice_vsi *vsi)
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
{
	struct ice_pf *pf = vsi->back;
	int i;

	mutex_lock(&pf->avail_q_mutex);

	for (i = 0; i < vsi->alloc_txq; i++) {
		clear_bit(vsi->txq_map[i], pf->avail_txqs);
		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
	}

	for (i = 0; i < vsi->alloc_rxq; i++) {
		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
	}

	mutex_unlock(&pf->avail_q_mutex);
}

T
Tony Nguyen 已提交
587 588 589 590 591 592 593 594 595 596 597
/**
 * ice_is_safe_mode
 * @pf: pointer to the PF struct
 *
 * returns true if driver is in safe mode, false otherwise
 */
bool ice_is_safe_mode(struct ice_pf *pf)
{
	return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
}

598
/**
T
Tony Nguyen 已提交
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
 * @vsi: the VSI being cleaned up
 *
 * This function deletes RSS input set for all flows that were configured
 * for this VSI
 */
static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	enum ice_status status;

	if (ice_is_safe_mode(pf))
		return;

	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
	if (status)
615 616
		dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
			vsi->vsi_num, ice_stat_str(status));
T
Tony Nguyen 已提交
617 618 619 620
}

/**
 * ice_rss_clean - Delete RSS related VSI structures and configuration
621 622 623 624
 * @vsi: the VSI being removed
 */
static void ice_rss_clean(struct ice_vsi *vsi)
{
B
Brett Creeley 已提交
625 626
	struct ice_pf *pf = vsi->back;
	struct device *dev;
627

B
Brett Creeley 已提交
628
	dev = ice_pf_to_dev(pf);
629 630

	if (vsi->rss_hkey_user)
B
Brett Creeley 已提交
631
		devm_kfree(dev, vsi->rss_hkey_user);
632
	if (vsi->rss_lut_user)
B
Brett Creeley 已提交
633
		devm_kfree(dev, vsi->rss_lut_user);
T
Tony Nguyen 已提交
634 635 636 637 638

	ice_vsi_clean_rss_flow_fld(vsi);
	/* remove RSS replay list */
	if (!ice_is_safe_mode(pf))
		ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
639 640
}

641 642 643 644
/**
 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
 * @vsi: the VSI being configured
 */
645
static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
646 647 648 649 650 651 652 653 654 655 656 657 658
{
	struct ice_hw_common_caps *cap;
	struct ice_pf *pf = vsi->back;

	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
		vsi->rss_size = 1;
		return;
	}

	cap = &pf->hw.func_caps.common_cap;
	switch (vsi->type) {
	case ICE_VSI_PF:
		/* PF VSI will inherit RSS instance of PF */
K
Karol Kolacinski 已提交
659 660
		vsi->rss_table_size = (u16)cap->rss_table_size;
		vsi->rss_size = min_t(u16, num_online_cpus(),
661 662 663
				      BIT(cap->rss_table_entry_width));
		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
		break;
664
	case ICE_VSI_VF:
M
Mitch Williams 已提交
665 666
		/* VF VSI will get a small RSS table.
		 * For VSI_LUT, LUT size should be set to 64 bytes.
667 668
		 */
		vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
M
Mitch Williams 已提交
669
		vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
670 671
		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
		break;
672 673
	case ICE_VSI_LB:
		break;
674
	default:
675 676
		dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
			ice_vsi_type_str(vsi->type));
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
		break;
	}
}

/**
 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
 * @ctxt: the VSI context being set
 *
 * This initializes a default VSI context for all sections except the Queues.
 */
static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
{
	u32 table = 0;

	memset(&ctxt->info, 0, sizeof(ctxt->info));
	/* VSI's should be allocated from shared pool */
	ctxt->alloc_from_pool = true;
	/* Src pruning enabled by default */
	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
	/* Traffic from VSI can be sent to LAN */
	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
	/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
	 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
	 * packets untagged/tagged.
	 */
	ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
				  ICE_AQ_VSI_VLAN_MODE_M) >>
				 ICE_AQ_VSI_VLAN_MODE_S);
	/* Have 1:1 UP mapping for both ingress/egress tables */
	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
	ctxt->info.ingress_table = cpu_to_le32(table);
	ctxt->info.egress_table = cpu_to_le32(table);
	/* Have 1:1 UP mapping for outer to inner UP table */
	ctxt->info.outer_up_table = cpu_to_le32(table);
	/* No Outer tag support outer_tag_flags remains to zero */
}

/**
 * ice_vsi_setup_q_map - Setup a VSI queue map
 * @vsi: the VSI being configured
 * @ctxt: VSI context structure
 */
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
728
	u16 offset = 0, qmap = 0, tx_count = 0;
729 730
	u16 qcount_tx = vsi->alloc_txq;
	u16 qcount_rx = vsi->alloc_rxq;
731 732
	u16 tx_numq_tc, rx_numq_tc;
	u16 pow = 0, max_rss = 0;
733
	bool ena_tc0 = false;
734
	u8 netdev_tc = 0;
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
	int i;

	/* at least TC0 should be enabled by default */
	if (vsi->tc_cfg.numtc) {
		if (!(vsi->tc_cfg.ena_tc & BIT(0)))
			ena_tc0 = true;
	} else {
		ena_tc0 = true;
	}

	if (ena_tc0) {
		vsi->tc_cfg.numtc++;
		vsi->tc_cfg.ena_tc |= 1;
	}

750 751 752 753 754 755
	rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
	if (!rx_numq_tc)
		rx_numq_tc = 1;
	tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
	if (!tx_numq_tc)
		tx_numq_tc = 1;
756 757 758 759 760 761 762 763 764 765 766 767 768

	/* TC mapping is a function of the number of Rx queues assigned to the
	 * VSI for each traffic class and the offset of these queues.
	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
	 * queues allocated to TC0. No:of queues is a power-of-2.
	 *
	 * If TC is not enabled, the queue offset is set to 0, and allocate one
	 * queue, this way, traffic for the given TC will be sent to the default
	 * queue.
	 *
	 * Setup number and offset of Rx queues for all TCs for the VSI
	 */

769 770
	qcount_rx = rx_numq_tc;

771 772
	/* qcount will change if RSS is enabled */
	if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
773 774 775 776
		if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
			if (vsi->type == ICE_VSI_PF)
				max_rss = ICE_MAX_LG_RSS_QS;
			else
M
Mitch Williams 已提交
777
				max_rss = ICE_MAX_RSS_QS_PER_VF;
K
Karol Kolacinski 已提交
778
			qcount_rx = min_t(u16, rx_numq_tc, max_rss);
779
			if (!vsi->req_rxq)
K
Karol Kolacinski 已提交
780
				qcount_rx = min_t(u16, qcount_rx,
781
						  vsi->rss_size);
782
		}
783 784 785
	}

	/* find the (rounded up) power-of-2 of qcount */
K
Karol Kolacinski 已提交
786
	pow = (u16)order_base_2(qcount_rx);
787

788
	ice_for_each_traffic_class(i) {
789 790 791
		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
			/* TC is not enabled */
			vsi->tc_cfg.tc_info[i].qoffset = 0;
792 793 794
			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
795 796 797 798 799 800
			ctxt->info.tc_mapping[i] = 0;
			continue;
		}

		/* TC is enabled */
		vsi->tc_cfg.tc_info[i].qoffset = offset;
801 802 803
		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
		vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
804 805 806 807 808

		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
			ICE_AQ_VSI_TC_Q_OFFSET_M) |
			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
			 ICE_AQ_VSI_TC_Q_NUM_M);
809 810
		offset += qcount_rx;
		tx_count += tx_numq_tc;
811 812
		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
	}
K
Kiran Patil 已提交
813 814 815 816 817 818 819 820 821 822 823 824

	/* if offset is non-zero, means it is calculated correctly based on
	 * enabled TCs for a given VSI otherwise qcount_rx will always
	 * be correct and non-zero because it is based off - VSI's
	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
	 * at least 1)
	 */
	if (offset)
		vsi->num_rxq = offset;
	else
		vsi->num_rxq = qcount_rx;

825
	vsi->num_txq = tx_count;
826

827
	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
A
Anirudh Venkataramanan 已提交
828
		dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
829 830 831 832 833 834
		/* since there is a chance that num_rxq could have been changed
		 * in the above for loop, make num_txq equal to num_rxq.
		 */
		vsi->num_txq = vsi->num_rxq;
	}

835 836 837 838 839 840 841 842 843 844
	/* Rx queue mapping */
	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
	/* q_mapping buffer holds the info for the first queue allocated for
	 * this VSI in the PF space and also the number of queues associated
	 * with this VSI.
	 */
	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
}

845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
/**
 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
 * @ctxt: the VSI context being set
 * @vsi: the VSI being configured
 */
static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
	u8 dflt_q_group, dflt_q_prio;
	u16 dflt_q, report_q, val;

	if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
		return;

	val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
	ctxt->info.valid_sections |= cpu_to_le16(val);
	dflt_q = 0;
	dflt_q_group = 0;
	report_q = 0;
	dflt_q_prio = 0;

	/* enable flow director filtering/programming */
	val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
	ctxt->info.fd_options = cpu_to_le16(val);
	/* max of allocated flow director filters */
	ctxt->info.max_fd_fltr_dedicated =
			cpu_to_le16(vsi->num_gfltr);
	/* max of shared flow director filters any VSI may program */
	ctxt->info.max_fd_fltr_shared =
			cpu_to_le16(vsi->num_bfltr);
	/* default queue index within the VSI of the default FD */
	val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
	       ICE_AQ_VSI_FD_DEF_Q_M);
	/* target queue or queue group to the FD filter */
	val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
		ICE_AQ_VSI_FD_DEF_GRP_M);
	ctxt->info.fd_def_q = cpu_to_le16(val);
	/* queue index on which FD filter completion is reported */
	val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
	       ICE_AQ_VSI_FD_REPORT_Q_M);
	/* priority of the default qindex action */
	val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
		ICE_AQ_VSI_FD_DEF_PRIORITY_M);
	ctxt->info.fd_report_opt = cpu_to_le16(val);
}

890 891 892 893 894 895 896 897
/**
 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
 * @ctxt: the VSI context being set
 * @vsi: the VSI being configured
 */
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
	u8 lut_type, hash_type;
B
Brett Creeley 已提交
898
	struct device *dev;
899 900 901
	struct ice_pf *pf;

	pf = vsi->back;
B
Brett Creeley 已提交
902
	dev = ice_pf_to_dev(pf);
903 904 905 906 907 908 909

	switch (vsi->type) {
	case ICE_VSI_PF:
		/* PF VSI will inherit RSS instance of PF */
		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
		break;
910 911 912 913 914
	case ICE_VSI_VF:
		/* VF VSI will gets a small RSS table which is a VSI LUT type */
		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
		break;
915
	default:
B
Brett Creeley 已提交
916
		dev_dbg(dev, "Unsupported VSI type %s\n",
917
			ice_vsi_type_str(vsi->type));
918
		return;
919 920 921 922 923 924 925 926 927 928 929
	}

	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
				((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
				 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}

/**
 * ice_vsi_init - Create and initialize a VSI
 * @vsi: the VSI being configured
930
 * @init_vsi: is this call creating a VSI
931 932 933 934
 *
 * This initializes a VSI context depending on the VSI type to be added and
 * passes it down to the add_vsi aq command to create a new VSI.
 */
935
static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
936 937 938
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
939
	struct ice_vsi_ctx *ctxt;
940
	struct device *dev;
941 942
	int ret = 0;

943
	dev = ice_pf_to_dev(pf);
944
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
945 946 947
	if (!ctxt)
		return -ENOMEM;

948
	switch (vsi->type) {
949
	case ICE_VSI_CTRL:
950
	case ICE_VSI_LB:
951
	case ICE_VSI_PF:
952
		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
953
		break;
954
	case ICE_VSI_VF:
955
		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
956
		/* VF number here is the absolute VF number (0-255) */
957
		ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
958
		break;
959
	default:
960 961
		ret = -ENODEV;
		goto out;
962 963
	}

964
	ice_set_dflt_vsi_ctx(ctxt);
965 966
	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
		ice_set_fd_vsi_ctx(ctxt, vsi);
967 968
	/* if the switch is in VEB mode, allow VSI loopback */
	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
969
		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
970 971

	/* Set LUT type and HASH type if RSS is enabled */
972 973
	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
	    vsi->type != ICE_VSI_CTRL) {
974
		ice_set_rss_vsi_ctx(ctxt, vsi);
975 976 977 978 979 980 981
		/* if updating VSI context, make sure to set valid_section:
		 * to indicate which section of VSI context being updated
		 */
		if (!init_vsi)
			ctxt->info.valid_sections |=
				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
	}
982

983 984
	ctxt->info.sw_id = vsi->port_info->sw_id;
	ice_vsi_setup_q_map(vsi, ctxt);
985 986 987 988 989 990
	if (!init_vsi) /* means VSI being updated */
		/* must to indicate which section of VSI context are
		 * being modified
		 */
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
991

B
Brett Creeley 已提交
992 993 994 995
	/* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
	 * respectively
	 */
	if (vsi->type == ICE_VSI_VF) {
996 997
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
B
Brett Creeley 已提交
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
		if (pf->vf[vsi->vf_id].spoofchk) {
			ctxt->info.sec_flags |=
				ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
				(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
				 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
		} else {
			ctxt->info.sec_flags &=
				~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
				  (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
				   ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
		}
1009 1010
	}

1011 1012 1013 1014 1015 1016 1017
	/* Allow control frames out of main VSI */
	if (vsi->type == ICE_VSI_PF) {
		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
		ctxt->info.valid_sections |=
			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
	}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
	if (init_vsi) {
		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
		if (ret) {
			dev_err(dev, "Add VSI failed, err %d\n", ret);
			ret = -EIO;
			goto out;
		}
	} else {
		ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
		if (ret) {
			dev_err(dev, "Update VSI failed, err %d\n", ret);
			ret = -EIO;
			goto out;
		}
1032 1033 1034
	}

	/* keep context for update VSI operations */
1035
	vsi->info = ctxt->info;
1036 1037

	/* record VSI number returned */
1038
	vsi->vsi_num = ctxt->vsi_num;
1039

1040 1041
out:
	kfree(ctxt);
1042 1043 1044
	return ret;
}

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
/**
 * ice_free_res - free a block of resources
 * @res: pointer to the resource
 * @index: starting index previously returned by ice_get_res
 * @id: identifier to track owner
 *
 * Returns number of resources freed
 */
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
{
	int count = 0;
	int i;

	if (!res || index >= res->end)
		return -EINVAL;

	id |= ICE_RES_VALID_BIT;
	for (i = index; i < res->end && res->list[i] == id; i++) {
		res->list[i] = 0;
		count++;
	}

	return count;
}

/**
 * ice_search_res - Search the tracker for a block of resources
 * @res: pointer to the resource
 * @needed: size of the block needed
 * @id: identifier to track owner
 *
 * Returns the base item index of the block, or -ENOMEM for error
 */
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
K
Karol Kolacinski 已提交
1080
	u16 start = 0, end = 0;
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147

	if (needed > res->end)
		return -ENOMEM;

	id |= ICE_RES_VALID_BIT;

	do {
		/* skip already allocated entries */
		if (res->list[end++] & ICE_RES_VALID_BIT) {
			start = end;
			if ((start + needed) > res->end)
				break;
		}

		if (end == (start + needed)) {
			int i = start;

			/* there was enough, so assign it to the requestor */
			while (i != end)
				res->list[i++] = id;

			return start;
		}
	} while (end < res->end);

	return -ENOMEM;
}

/**
 * ice_get_free_res_count - Get free count from a resource tracker
 * @res: Resource tracker instance
 */
static u16 ice_get_free_res_count(struct ice_res_tracker *res)
{
	u16 i, count = 0;

	for (i = 0; i < res->end; i++)
		if (!(res->list[i] & ICE_RES_VALID_BIT))
			count++;

	return count;
}

/**
 * ice_get_res - get a block of resources
 * @pf: board private structure
 * @res: pointer to the resource
 * @needed: size of the block needed
 * @id: identifier to track owner
 *
 * Returns the base item index of the block, or negative for error
 */
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
{
	if (!res || !pf)
		return -EINVAL;

	if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
		dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
			needed, res->num_entries, id);
		return -EINVAL;
	}

	return ice_search_res(res, needed, id);
}

1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
/**
 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
 * @vsi: ptr to the VSI
 *
 * This should only be called after ice_vsi_alloc() which allocates the
 * corresponding SW VSI structure and initializes num_queue_pairs for the
 * newly allocated VSI.
 *
 * Returns 0 on success or negative on failure
 */
1158
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1159 1160
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
1161
	struct device *dev;
B
Brett Creeley 已提交
1162
	u16 num_q_vectors;
K
Karol Kolacinski 已提交
1163
	int base;
1164

B
Brett Creeley 已提交
1165
	dev = ice_pf_to_dev(pf);
B
Brett Creeley 已提交
1166 1167 1168 1169 1170
	/* SRIOV doesn't grab irq_tracker entries for each VSI */
	if (vsi->type == ICE_VSI_VF)
		return 0;

	if (vsi->base_vector) {
B
Brett Creeley 已提交
1171
		dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
B
Brett Creeley 已提交
1172
			vsi->vsi_num, vsi->base_vector);
1173 1174 1175
		return -EEXIST;
	}

B
Brett Creeley 已提交
1176 1177
	num_q_vectors = vsi->num_q_vectors;
	/* reserve slots from OS requested IRQs */
K
Karol Kolacinski 已提交
1178 1179 1180
	base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);

	if (base < 0) {
1181 1182 1183
		dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
			ice_get_free_res_count(pf->irq_tracker),
			ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
1184 1185
		return -ENOENT;
	}
K
Karol Kolacinski 已提交
1186
	vsi->base_vector = (u16)base;
B
Brett Creeley 已提交
1187
	pf->num_avail_sw_msix -= num_q_vectors;
1188

1189 1190 1191
	return 0;
}

1192 1193 1194 1195
/**
 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
 * @vsi: the VSI having rings deallocated
 */
1196
static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1197 1198 1199
{
	int i;

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	/* Avoid stale references by clearing map from vector to ring */
	if (vsi->q_vectors) {
		ice_for_each_q_vector(vsi, i) {
			struct ice_q_vector *q_vector = vsi->q_vectors[i];

			if (q_vector) {
				q_vector->tx.ring = NULL;
				q_vector->rx.ring = NULL;
			}
		}
	}

1212 1213 1214 1215
	if (vsi->tx_rings) {
		for (i = 0; i < vsi->alloc_txq; i++) {
			if (vsi->tx_rings[i]) {
				kfree_rcu(vsi->tx_rings[i], rcu);
1216
				WRITE_ONCE(vsi->tx_rings[i], NULL);
1217 1218 1219 1220 1221 1222 1223
			}
		}
	}
	if (vsi->rx_rings) {
		for (i = 0; i < vsi->alloc_rxq; i++) {
			if (vsi->rx_rings[i]) {
				kfree_rcu(vsi->rx_rings[i], rcu);
1224
				WRITE_ONCE(vsi->rx_rings[i], NULL);
1225 1226 1227 1228 1229 1230 1231 1232 1233
			}
		}
	}
}

/**
 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
 * @vsi: VSI which is having rings allocated
 */
1234
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1235 1236
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
1237
	struct device *dev;
K
Karol Kolacinski 已提交
1238
	u16 i;
1239

B
Brett Creeley 已提交
1240
	dev = ice_pf_to_dev(pf);
1241
	/* Allocate Tx rings */
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
	for (i = 0; i < vsi->alloc_txq; i++) {
		struct ice_ring *ring;

		/* allocate with kzalloc(), free with kfree_rcu() */
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);

		if (!ring)
			goto err_out;

		ring->q_index = i;
		ring->reg_idx = vsi->txq_map[i];
		ring->ring_active = false;
		ring->vsi = vsi;
B
Brett Creeley 已提交
1255
		ring->dev = dev;
1256
		ring->count = vsi->num_tx_desc;
1257
		WRITE_ONCE(vsi->tx_rings[i], ring);
1258 1259
	}

1260
	/* Allocate Rx rings */
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
	for (i = 0; i < vsi->alloc_rxq; i++) {
		struct ice_ring *ring;

		/* allocate with kzalloc(), free with kfree_rcu() */
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
		if (!ring)
			goto err_out;

		ring->q_index = i;
		ring->reg_idx = vsi->rxq_map[i];
		ring->ring_active = false;
		ring->vsi = vsi;
		ring->netdev = vsi->netdev;
B
Brett Creeley 已提交
1274
		ring->dev = dev;
1275
		ring->count = vsi->num_rx_desc;
1276
		WRITE_ONCE(vsi->rx_rings[i], ring);
1277 1278 1279 1280 1281 1282 1283 1284 1285
	}

	return 0;

err_out:
	ice_vsi_clear_rings(vsi);
	return -ENOMEM;
}

1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
/**
 * ice_vsi_manage_rss_lut - disable/enable RSS
 * @vsi: the VSI being changed
 * @ena: boolean value indicating if this is an enable or disable request
 *
 * In the event of disable request for RSS, this function will zero out RSS
 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
 * LUT.
 */
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{
	int err = 0;
	u8 *lut;

1300
	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
	if (!lut)
		return -ENOMEM;

	if (ena) {
		if (vsi->rss_lut_user)
			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
		else
			ice_fill_rss_lut(lut, vsi->rss_table_size,
					 vsi->rss_size);
	}

	err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
1313
	kfree(lut);
1314 1315 1316
	return err;
}

1317 1318 1319 1320 1321 1322 1323 1324 1325
/**
 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
 * @vsi: VSI to be configured
 */
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
	struct ice_aqc_get_set_rss_keys *key;
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
B
Brett Creeley 已提交
1326
	struct device *dev;
1327 1328 1329
	int err = 0;
	u8 *lut;

B
Brett Creeley 已提交
1330
	dev = ice_pf_to_dev(pf);
K
Karol Kolacinski 已提交
1331
	vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1332

1333
	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1334 1335 1336 1337 1338 1339 1340 1341
	if (!lut)
		return -ENOMEM;

	if (vsi->rss_lut_user)
		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
	else
		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);

1342 1343
	status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
				    vsi->rss_table_size);
1344 1345

	if (status) {
1346 1347
		dev_err(dev, "set_rss_lut failed, error %s\n",
			ice_stat_str(status));
1348 1349 1350 1351
		err = -EIO;
		goto ice_vsi_cfg_rss_exit;
	}

1352
	key = kzalloc(sizeof(*key), GFP_KERNEL);
1353 1354 1355 1356 1357 1358
	if (!key) {
		err = -ENOMEM;
		goto ice_vsi_cfg_rss_exit;
	}

	if (vsi->rss_hkey_user)
1359 1360 1361
		memcpy(key,
		       (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
		       ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1362
	else
1363 1364
		netdev_rss_key_fill((void *)key,
				    ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1365

1366
	status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
1367 1368

	if (status) {
1369 1370
		dev_err(dev, "set_rss_key failed, error %s\n",
			ice_stat_str(status));
1371 1372 1373
		err = -EIO;
	}

1374
	kfree(key);
1375
ice_vsi_cfg_rss_exit:
1376
	kfree(lut);
1377 1378 1379
	return err;
}

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
/**
 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
 * @vsi: VSI to be configured
 *
 * This function will only be called during the VF VSI setup. Upon successful
 * completion of package download, this function will configure default RSS
 * input sets for VF VSI.
 */
static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
	struct device *dev;

	dev = ice_pf_to_dev(pf);
	if (ice_is_safe_mode(pf)) {
		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
			vsi->vsi_num);
		return;
	}

	status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
	if (status)
1403 1404
		dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
			vsi->vsi_num, ice_stat_str(status));
1405 1406
}

1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
/**
 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
 * @vsi: VSI to be configured
 *
 * This function will only be called after successful download package call
 * during initialization of PF. Since the downloaded package will erase the
 * RSS section, this function will configure RSS input sets for different
 * flow types. The last profile added has the highest priority, therefore 2
 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
 * (i.e. IPv4 src/dst TCP src/dst port).
 */
static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
{
	u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	enum ice_status status;
	struct device *dev;

	dev = ice_pf_to_dev(pf);
	if (ice_is_safe_mode(pf)) {
		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
			vsi_num);
		return;
	}
	/* configure RSS for IPv4 with input set IP src/dst */
	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
				 ICE_FLOW_SEG_HDR_IPV4);
	if (status)
1436 1437
		dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
			vsi_num, ice_stat_str(status));
1438 1439 1440 1441 1442

	/* configure RSS for IPv6 with input set IPv6 src/dst */
	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
				 ICE_FLOW_SEG_HDR_IPV6);
	if (status)
1443 1444
		dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
			vsi_num, ice_stat_str(status));
1445 1446 1447 1448 1449

	/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
				 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
	if (status)
1450 1451
		dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
			vsi_num, ice_stat_str(status));
1452 1453 1454 1455 1456

	/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
				 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
	if (status)
1457 1458
		dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
			vsi_num, ice_stat_str(status));
1459 1460 1461 1462 1463

	/* configure RSS for sctp4 with input set IP src/dst */
	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
				 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
	if (status)
1464 1465
		dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
			vsi_num, ice_stat_str(status));
1466 1467 1468 1469 1470

	/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
				 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
	if (status)
1471 1472
		dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
			vsi_num, ice_stat_str(status));
1473 1474 1475 1476 1477

	/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
				 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
	if (status)
1478 1479
		dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
			vsi_num, ice_stat_str(status));
1480 1481 1482 1483 1484

	/* configure RSS for sctp6 with input set IPv6 src/dst */
	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
				 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
	if (status)
1485 1486
		dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
			vsi_num, ice_stat_str(status));
1487 1488
}

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
/**
 * ice_pf_state_is_nominal - checks the PF for nominal state
 * @pf: pointer to PF to check
 *
 * Check the PF's state for a collection of bits that would indicate
 * the PF is in a state that would inhibit normal operation for
 * driver functionality.
 *
 * Returns true if PF is in a nominal state, false otherwise
 */
bool ice_pf_state_is_nominal(struct ice_pf *pf)
{
	DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };

	if (!pf)
		return false;

	bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
	if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
		return false;

	return true;
}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
/**
 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
 * @vsi: the VSI to be updated
 */
void ice_update_eth_stats(struct ice_vsi *vsi)
{
	struct ice_eth_stats *prev_es, *cur_es;
	struct ice_hw *hw = &vsi->back->hw;
	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */

	prev_es = &vsi->eth_stats_prev;
	cur_es = &vsi->eth_stats;

1526 1527
	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_bytes, &cur_es->rx_bytes);
1528

1529 1530
	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_unicast, &cur_es->rx_unicast);
1531

1532 1533
	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_multicast, &cur_es->rx_multicast);
1534

1535 1536
	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1537 1538 1539 1540

	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->rx_discards, &cur_es->rx_discards);

1541 1542
	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_bytes, &cur_es->tx_bytes);
1543

1544 1545
	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_unicast, &cur_es->tx_unicast);
1546

1547 1548
	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_multicast, &cur_es->tx_multicast);
1549

1550 1551
	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561

	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
			  &prev_es->tx_errors, &cur_es->tx_errors);

	vsi->stat_offsets_loaded = true;
}

/**
 * ice_vsi_add_vlan - Add VSI membership for given VLAN
 * @vsi: the VSI being configured
1562
 * @vid: VLAN ID to be added
1563
 * @action: filter action to be performed on match
1564
 */
1565 1566
int
ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
1567 1568
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
1569
	struct device *dev;
1570 1571
	int err = 0;

B
Brett Creeley 已提交
1572
	dev = ice_pf_to_dev(pf);
1573

1574
	if (!ice_fltr_add_vlan(vsi, vid, action)) {
1575 1576
		vsi->num_vlan++;
	} else {
1577
		err = -ENODEV;
B
Brett Creeley 已提交
1578 1579
		dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
			vsi->vsi_num);
1580 1581 1582 1583 1584 1585 1586 1587
	}

	return err;
}

/**
 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
 * @vsi: the VSI being configured
1588
 * @vid: VLAN ID to be removed
1589 1590 1591 1592 1593 1594
 *
 * Returns 0 on success and negative on failure
 */
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
	struct ice_pf *pf = vsi->back;
1595
	enum ice_status status;
B
Brett Creeley 已提交
1596
	struct device *dev;
1597
	int err = 0;
1598

B
Brett Creeley 已提交
1599
	dev = ice_pf_to_dev(pf);
1600

1601
	status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
1602 1603 1604
	if (!status) {
		vsi->num_vlan--;
	} else if (status == ICE_ERR_DOES_NOT_EXIST) {
1605 1606
		dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
			vid, vsi->vsi_num, ice_stat_str(status));
1607
	} else {
1608 1609
		dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
			vid, vsi->vsi_num, ice_stat_str(status));
1610
		err = -EIO;
1611 1612
	}

1613
	return err;
1614 1615
}

M
Maciej Fijalkowski 已提交
1616 1617 1618 1619 1620 1621
/**
 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
 * @vsi: VSI
 */
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
M
Maciej Fijalkowski 已提交
1622 1623 1624 1625
	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
		vsi->rx_buf_len = ICE_RXBUF_2048;
#if (PAGE_SIZE < 8192)
1626 1627
	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
M
Maciej Fijalkowski 已提交
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
		vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
		vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
	} else {
		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
#if (PAGE_SIZE < 8192)
		vsi->rx_buf_len = ICE_RXBUF_3072;
#else
		vsi->rx_buf_len = ICE_RXBUF_2048;
#endif
	}
M
Maciej Fijalkowski 已提交
1639 1640
}

1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
/**
 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
 * @hw: HW pointer
 * @pf_q: index of the Rx queue in the PF's queue space
 * @rxdid: flexible descriptor RXDID
 * @prio: priority for the RXDID for this queue
 */
void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
{
	int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));

	/* clear any previous values */
	regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
		    QRXFLXP_CNTXT_RXDID_PRIO_M |
		    QRXFLXP_CNTXT_TS_M);

	regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
		QRXFLXP_CNTXT_RXDID_IDX_M;

	regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
		QRXFLXP_CNTXT_RXDID_PRIO_M;

	wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
/**
 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Rx VSI for operation.
 */
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
	u16 i;

1678 1679 1680
	if (vsi->type == ICE_VSI_VF)
		goto setup_rings;

M
Maciej Fijalkowski 已提交
1681
	ice_vsi_cfg_frame_size(vsi);
1682
setup_rings:
1683
	/* set up individual rings */
1684 1685
	for (i = 0; i < vsi->num_rxq; i++) {
		int err;
1686

1687 1688
		err = ice_setup_rx_ctx(vsi->rx_rings[i]);
		if (err) {
1689
			dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
1690 1691 1692
				i, err);
			return err;
		}
1693
	}
1694 1695

	return 0;
1696 1697 1698 1699 1700
}

/**
 * ice_vsi_cfg_txqs - Configure the VSI for Tx
 * @vsi: the VSI being configured
1701
 * @rings: Tx ring array to be configured
1702 1703 1704 1705
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx VSI for operation.
 */
1706
static int
1707
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1708 1709
{
	struct ice_aqc_add_tx_qgrp *qg_buf;
1710
	u16 q_idx = 0;
1711
	int err = 0;
1712

1713
	qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
1714 1715 1716 1717 1718
	if (!qg_buf)
		return -ENOMEM;

	qg_buf->num_txqs = 1;

1719 1720 1721 1722
	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
		if (err)
			goto err_cfg_txqs;
1723
	}
1724

1725
err_cfg_txqs:
1726
	kfree(qg_buf);
1727 1728 1729
	return err;
}

1730 1731 1732 1733 1734 1735 1736 1737 1738
/**
 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx VSI for operation.
 */
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
{
1739
	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
1740 1741
}

M
Maciej Fijalkowski 已提交
1742 1743 1744 1745 1746 1747 1748 1749 1750
/**
 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
 * @vsi: the VSI being configured
 *
 * Return 0 on success and a negative value on error
 * Configure the Tx queues dedicated for XDP in given VSI for operation.
 */
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
{
1751 1752 1753 1754 1755 1756 1757 1758
	int ret;
	int i;

	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
	if (ret)
		return ret;

	for (i = 0; i < vsi->num_xdp_txq; i++)
1759
		vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
1760 1761

	return ret;
M
Maciej Fijalkowski 已提交
1762 1763
}

1764 1765 1766 1767 1768 1769 1770 1771
/**
 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
 * @intrl: interrupt rate limit in usecs
 * @gran: interrupt rate limit granularity in usecs
 *
 * This function converts a decimal interrupt rate limit in usecs to the format
 * expected by firmware.
 */
1772
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1773 1774 1775 1776 1777 1778 1779 1780
{
	u32 val = intrl / gran;

	if (val)
		return val | GLINT_RATE_INTRL_ENA_M;
	return 0;
}

1781 1782 1783
/**
 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
 * @vsi: the VSI being configured
1784 1785 1786
 *
 * This configures MSIX mode interrupts for the PF VSI, and should not be used
 * for the VF VSI.
1787 1788 1789 1790 1791
 */
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
K
Karol Kolacinski 已提交
1792
	u16 txq = 0, rxq = 0;
1793
	int i, q;
1794

1795
	for (i = 0; i < vsi->num_q_vectors; i++) {
1796
		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1797
		u16 reg_idx = q_vector->reg_idx;
1798

1799
		ice_cfg_itr(hw, q_vector);
1800

1801
		wr32(hw, GLINT_RATE(reg_idx),
1802
		     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815

		/* Both Transmit Queue Interrupt Cause Control register
		 * and Receive Queue Interrupt Cause control register
		 * expects MSIX_INDX field to be the vector index
		 * within the function space and not the absolute
		 * vector index across PF or across device.
		 * For SR-IOV VF VSIs queue vector index always starts
		 * with 1 since first vector index(0) is used for OICR
		 * in VF space. Since VMDq and other PF VSIs are within
		 * the PF function space, use the vector index that is
		 * tracked for this PF.
		 */
		for (q = 0; q < q_vector->num_ring_tx; q++) {
1816 1817
			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
					      q_vector->tx.itr_idx);
1818 1819 1820 1821
			txq++;
		}

		for (q = 0; q < q_vector->num_ring_rx; q++) {
1822 1823
			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
					      q_vector->rx.itr_idx);
1824 1825 1826 1827 1828
			rxq++;
		}
	}
}

1829 1830 1831 1832 1833 1834 1835
/**
 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
 * @vsi: the VSI being changed
 */
int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
	struct ice_hw *hw = &vsi->back->hw;
1836
	struct ice_vsi_ctx *ctxt;
1837
	enum ice_status status;
1838 1839
	int ret = 0;

1840
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1841 1842
	if (!ctxt)
		return -ENOMEM;
1843 1844 1845 1846 1847

	/* Here we are configuring the VSI to let the driver add VLAN tags by
	 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
	 * insertion happens in the Tx hot path, in ice_tx_map.
	 */
1848
	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1849

1850 1851 1852 1853
	/* Preserve existing VLAN strip setting */
	ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
				  ICE_AQ_VSI_VLAN_EMOD_M);

1854
	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1855

1856
	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1857
	if (status) {
1858 1859 1860
		dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
			ice_stat_str(status),
			ice_aq_str(hw->adminq.sq_last_status));
1861 1862
		ret = -EIO;
		goto out;
1863 1864
	}

1865 1866
	vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
1867
	kfree(ctxt);
1868
	return ret;
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
}

/**
 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
 * @vsi: the VSI being changed
 * @ena: boolean value indicating if this is a enable or disable request
 */
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
	struct ice_hw *hw = &vsi->back->hw;
1879
	struct ice_vsi_ctx *ctxt;
1880
	enum ice_status status;
1881 1882
	int ret = 0;

1883 1884 1885 1886 1887 1888
	/* do not allow modifying VLAN stripping when a port VLAN is configured
	 * on this VSI
	 */
	if (vsi->info.pvid)
		return 0;

1889
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1890 1891
	if (!ctxt)
		return -ENOMEM;
1892 1893 1894 1895 1896

	/* Here we are configuring what the VSI should do with the VLAN tag in
	 * the Rx packet. We can either leave the tag in the packet or put it in
	 * the Rx descriptor.
	 */
1897
	if (ena)
1898
		/* Strip VLAN tag from Rx packet and put it in the desc */
1899 1900
		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
	else
1901
		/* Disable stripping. Leave tag in packet */
1902
		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1903 1904

	/* Allow all packets untagged/tagged */
1905
	ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
1906

1907
	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1908

1909
	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1910
	if (status) {
1911 1912 1913
		dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
			ena, ice_stat_str(status),
			ice_aq_str(hw->adminq.sq_last_status));
1914 1915
		ret = -EIO;
		goto out;
1916 1917
	}

1918 1919
	vsi->info.vlan_flags = ctxt->info.vlan_flags;
out:
1920
	kfree(ctxt);
1921
	return ret;
1922
}
1923 1924

/**
1925 1926
 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
 * @vsi: the VSI whose rings are to be enabled
1927 1928 1929
 *
 * Returns 0 on success and a negative value on error
 */
1930
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
1931
{
1932
	return ice_vsi_ctrl_all_rx_rings(vsi, true);
1933 1934 1935
}

/**
1936 1937
 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
 * @vsi: the VSI whose rings are to be disabled
1938 1939 1940
 *
 * Returns 0 on success and a negative value on error
 */
1941
int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
1942
{
1943
	return ice_vsi_ctrl_all_rx_rings(vsi, false);
1944 1945
}

1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
/**
 * ice_vsi_stop_tx_rings - Disable Tx rings
 * @vsi: the VSI being configured
 * @rst_src: reset source
 * @rel_vmvf_num: Relative ID of VF/VM
 * @rings: Tx ring array to be stopped
 */
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
		      u16 rel_vmvf_num, struct ice_ring **rings)
{
1957
	u16 q_idx;
1958 1959 1960

	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
		return -EINVAL;
1961

1962 1963 1964
	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
		struct ice_txq_meta txq_meta = { };
		int status;
1965

1966 1967
		if (!rings || !rings[q_idx])
			return -EINVAL;
1968

1969 1970 1971
		ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
		status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
					      rings[q_idx], &txq_meta);
1972

1973 1974
		if (status)
			return status;
1975 1976
	}

1977
	return 0;
1978
}
1979

1980 1981 1982 1983
/**
 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
 * @vsi: the VSI being configured
 * @rst_src: reset source
1984
 * @rel_vmvf_num: Relative ID of VF/VM
1985
 */
1986 1987 1988
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
			  u16 rel_vmvf_num)
1989
{
1990
	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
1991 1992
}

M
Maciej Fijalkowski 已提交
1993 1994 1995 1996 1997 1998 1999 2000 2001
/**
 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
 * @vsi: the VSI being configured
 */
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
{
	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
}

2002 2003 2004 2005
/**
 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
 * @vsi: VSI to check whether or not VLAN pruning is enabled.
 *
2006
 * returns true if Rx VLAN pruning is enabled and false otherwise.
2007 2008 2009 2010 2011 2012
 */
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
{
	if (!vsi)
		return false;

2013
	return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
2014 2015
}

2016 2017 2018 2019
/**
 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
 * @vsi: VSI to enable or disable VLAN pruning on
 * @ena: set to true to enable VLAN pruning and false to disable it
2020
 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
2021 2022 2023
 *
 * returns 0 if VSI is updated, negative otherwise
 */
2024
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
2025 2026
{
	struct ice_vsi_ctx *ctxt;
2027
	struct ice_pf *pf;
2028 2029 2030 2031 2032
	int status;

	if (!vsi)
		return -EINVAL;

2033 2034 2035 2036 2037 2038 2039
	/* Don't enable VLAN pruning if the netdev is currently in promiscuous
	 * mode. VLAN pruning will be enabled when the interface exits
	 * promiscuous mode if any VLAN filters are active.
	 */
	if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
		return 0;

2040
	pf = vsi->back;
2041
	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
2042 2043 2044 2045 2046
	if (!ctxt)
		return -ENOMEM;

	ctxt->info = vsi->info;

B
Brett Creeley 已提交
2047
	if (ena)
2048
		ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
B
Brett Creeley 已提交
2049
	else
2050 2051
		ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;

2052 2053
	if (!vlan_promisc)
		ctxt->info.valid_sections =
B
Brett Creeley 已提交
2054
			cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
2055

2056
	status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
2057
	if (status) {
2058 2059 2060 2061
		netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
			   ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
			   ice_stat_str(status),
			   ice_aq_str(pf->hw.adminq.sq_last_status));
2062 2063 2064 2065 2066
		goto err_out;
	}

	vsi->info.sw_flags2 = ctxt->info.sw_flags2;

2067
	kfree(ctxt);
2068 2069 2070
	return 0;

err_out:
2071
	kfree(ctxt);
2072 2073 2074
	return -EIO;
}

2075 2076 2077 2078 2079 2080 2081 2082
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
	struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;

	vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
	vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
}

2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
/**
 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
 * @vsi: VSI to set the q_vectors register index on
 */
static int
ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
{
	u16 i;

	if (!vsi || !vsi->q_vectors)
		return -EINVAL;

	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		if (!q_vector) {
2099
			dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
2100 2101 2102 2103
				i, vsi->vsi_num);
			goto clear_reg_idx;
		}

B
Brett Creeley 已提交
2104 2105 2106 2107 2108 2109 2110 2111
		if (vsi->type == ICE_VSI_VF) {
			struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];

			q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
		} else {
			q_vector->reg_idx =
				q_vector->v_idx + vsi->base_vector;
		}
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
	}

	return 0;

clear_reg_idx:
	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		if (q_vector)
			q_vector->reg_idx = 0;
	}

	return -EINVAL;
}

2127 2128 2129 2130 2131 2132 2133 2134
/**
 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
 * @vsi: the VSI being configured
 * @tx: bool to determine Tx or Rx rule
 * @create: bool to determine create or remove Rule
 */
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
2135 2136
	enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
				    enum ice_sw_fwd_act_type act);
2137 2138
	struct ice_pf *pf = vsi->back;
	enum ice_status status;
B
Brett Creeley 已提交
2139
	struct device *dev;
2140

B
Brett Creeley 已提交
2141
	dev = ice_pf_to_dev(pf);
2142
	eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
2143

2144 2145 2146
	if (tx)
		status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
				  ICE_DROP_PACKET);
2147
	else
2148
		status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI);
2149 2150

	if (status)
2151
		dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
2152
			create ? "adding" : "removing", tx ? "TX" : "RX",
2153
			vsi->vsi_num, ice_stat_str(status));
2154 2155
}

2156 2157 2158 2159
/**
 * ice_vsi_setup - Set up a VSI by a given type
 * @pf: board private structure
 * @pi: pointer to the port_info instance
2160
 * @vsi_type: VSI type
2161
 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
 *         used only for ICE_VSI_VF VSI type. For other VSI types, should
 *         fill-in ICE_INVAL_VFID as input.
 *
 * This allocates the sw VSI structure and its queue resources.
 *
 * Returns pointer to the successfully allocated and configured VSI sw struct on
 * success, NULL on failure.
 */
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2172
	      enum ice_vsi_type vsi_type, u16 vf_id)
2173 2174
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
B
Brett Creeley 已提交
2175
	struct device *dev = ice_pf_to_dev(pf);
2176
	enum ice_status status;
2177 2178 2179
	struct ice_vsi *vsi;
	int ret, i;

2180 2181
	if (vsi_type == ICE_VSI_VF)
		vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
2182
	else
2183
		vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
2184

2185 2186 2187 2188 2189 2190 2191
	if (!vsi) {
		dev_err(dev, "could not allocate VSI\n");
		return NULL;
	}

	vsi->port_info = pi;
	vsi->vsw = pf->first_sw;
2192 2193 2194
	if (vsi->type == ICE_VSI_PF)
		vsi->ethtype = ETH_P_PAUSE;

2195 2196
	if (vsi->type == ICE_VSI_VF)
		vsi->vf_id = vf_id;
2197

2198 2199
	ice_alloc_fd_res(vsi);

2200 2201 2202
	if (ice_vsi_get_qs(vsi)) {
		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
			vsi->idx);
2203
		goto unroll_vsi_alloc;
2204 2205 2206 2207 2208
	}

	/* set RSS capabilities */
	ice_vsi_set_rss_params(vsi);

2209
	/* set TC configuration */
2210 2211
	ice_vsi_set_tc_cfg(vsi);

2212
	/* create the VSI */
2213
	ret = ice_vsi_init(vsi, true);
2214 2215 2216 2217
	if (ret)
		goto unroll_get_qs;

	switch (vsi->type) {
2218
	case ICE_VSI_CTRL:
2219 2220 2221 2222 2223 2224 2225 2226 2227
	case ICE_VSI_PF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto unroll_vsi_init;

		ret = ice_vsi_setup_vector_base(vsi);
		if (ret)
			goto unroll_alloc_q_vector;

2228 2229 2230 2231
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto unroll_vector_base;

2232 2233 2234 2235
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_vector_base;

2236 2237 2238 2239 2240 2241 2242
		/* Always add VLAN ID 0 switch rule by default. This is needed
		 * in order to allow all untagged and 0 tagged priority traffic
		 * if Rx VLAN pruning is enabled. Also there are cases where we
		 * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
		 * so this handles those cases (i.e. adding the PF to a bridge
		 * without the 8021q module loaded).
		 */
2243
		ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
2244 2245 2246
		if (ret)
			goto unroll_clear_rings;

2247 2248
		ice_vsi_map_rings_to_vectors(vsi);

2249 2250 2251 2252 2253 2254 2255 2256 2257 2258
		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
		if (vsi->type != ICE_VSI_CTRL)
			/* Do not exit if configuring RSS had an issue, at
			 * least receive traffic on first queue. Hence no
			 * need to capture return value
			 */
			if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
				ice_vsi_cfg_rss_lut_key(vsi);
				ice_vsi_set_rss_flow_fld(vsi);
			}
B
Brett Creeley 已提交
2259
		ice_init_arfs(vsi);
2260
		break;
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
	case ICE_VSI_VF:
		/* VF driver will take care of creating netdev for this type and
		 * map queues to vectors through Virtchnl, PF driver only
		 * creates a VSI and corresponding structures for bookkeeping
		 * purpose
		 */
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto unroll_vsi_init;

		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_alloc_q_vector;

2275 2276 2277 2278
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto unroll_vector_base;

2279 2280 2281 2282
		/* Do not exit if configuring RSS had an issue, at least
		 * receive traffic on first queue. Hence no need to capture
		 * return value
		 */
2283
		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2284
			ice_vsi_cfg_rss_lut_key(vsi);
2285 2286
			ice_vsi_set_vf_rss_flow_fld(vsi);
		}
2287
		break;
2288 2289 2290 2291 2292
	case ICE_VSI_LB:
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto unroll_vsi_init;
		break;
2293
	default:
2294
		/* clean up the resources and exit */
2295 2296 2297 2298 2299
		goto unroll_vsi_init;
	}

	/* configure VSI nodes based on number of queues and TC's */
	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2300
		max_txqs[i] = vsi->alloc_txq;
2301

2302 2303 2304
	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);
	if (status) {
2305 2306
		dev_err(dev, "VSI %d failed lan queue config, error %s\n",
			vsi->vsi_num, ice_stat_str(status));
2307
		goto unroll_clear_rings;
2308 2309
	}

2310 2311 2312 2313 2314
	/* Add switch rule to drop all Tx Flow Control Frames, of look up
	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
	 * The rule is added once for PF VSI in order to create appropriate
	 * recipe, since VSI/VSI list is ignored with drop action...
2315 2316 2317
	 * Also add rules to handle LLDP Tx packets.  Tx LLDP packets need to
	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
	 * settings in the HW.
2318
	 */
2319
	if (!ice_is_safe_mode(pf))
T
Tony Nguyen 已提交
2320
		if (vsi->type == ICE_VSI_PF) {
2321 2322
			ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
					 ICE_DROP_PACKET);
T
Tony Nguyen 已提交
2323 2324
			ice_cfg_sw_lldp(vsi, true, true);
		}
2325

2326 2327
	return vsi;

2328 2329
unroll_clear_rings:
	ice_vsi_clear_rings(vsi);
2330
unroll_vector_base:
2331
	/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2332
	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2333
	pf->num_avail_sw_msix += vsi->num_q_vectors;
2334 2335 2336 2337 2338 2339
unroll_alloc_q_vector:
	ice_vsi_free_q_vectors(vsi);
unroll_vsi_init:
	ice_vsi_delete(vsi);
unroll_get_qs:
	ice_vsi_put_qs(vsi);
2340
unroll_vsi_alloc:
2341 2342 2343 2344 2345
	ice_vsi_clear(vsi);

	return NULL;
}

2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
/**
 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
 * @vsi: the VSI being cleaned up
 */
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 txq = 0;
	u32 rxq = 0;
	int i, q;

B
Brett Creeley 已提交
2358
	for (i = 0; i < vsi->num_q_vectors; i++) {
2359
		struct ice_q_vector *q_vector = vsi->q_vectors[i];
B
Brett Creeley 已提交
2360
		u16 reg_idx = q_vector->reg_idx;
2361

B
Brett Creeley 已提交
2362 2363
		wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
		wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
2364 2365
		for (q = 0; q < q_vector->num_ring_tx; q++) {
			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
M
Maciej Fijalkowski 已提交
2366 2367 2368 2369 2370
			if (ice_is_xdp_ena_vsi(vsi)) {
				u32 xdp_txq = txq + vsi->num_xdp_txq;

				wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
			}
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
			txq++;
		}

		for (q = 0; q < q_vector->num_ring_rx; q++) {
			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
			rxq++;
		}
	}

	ice_flush(hw);
}

/**
 * ice_vsi_free_irq - Free the IRQ association with the OS
 * @vsi: the VSI being configured
 */
void ice_vsi_free_irq(struct ice_vsi *vsi)
{
	struct ice_pf *pf = vsi->back;
B
Brett Creeley 已提交
2390
	int base = vsi->base_vector;
2391
	int i;
2392

2393 2394
	if (!vsi->q_vectors || !vsi->irqs_ready)
		return;
2395

2396 2397 2398
	ice_vsi_release_msix(vsi);
	if (vsi->type == ICE_VSI_VF)
		return;
2399

2400 2401 2402 2403
	vsi->irqs_ready = false;
	ice_for_each_q_vector(vsi, i) {
		u16 vector = i + base;
		int irq_num;
2404

2405
		irq_num = pf->msix_entries[vector].vector;
2406

2407 2408 2409 2410 2411
		/* free only the irqs that were actually requested */
		if (!vsi->q_vectors[i] ||
		    !(vsi->q_vectors[i]->num_ring_tx ||
		      vsi->q_vectors[i]->num_ring_rx))
			continue;
2412

2413 2414
		/* clear the affinity notifier in the IRQ descriptor */
		irq_set_affinity_notifier(irq_num, NULL);
2415

2416 2417 2418
		/* clear the affinity_mask in the IRQ descriptor */
		irq_set_affinity_hint(irq_num, NULL);
		synchronize_irq(irq_num);
B
Brett Creeley 已提交
2419
		devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	}
}

/**
 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
 * @vsi: the VSI having resources freed
 */
void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->tx_rings)
		return;

	ice_for_each_txq(vsi, i)
		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
			ice_free_tx_ring(vsi->tx_rings[i]);
}

/**
 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
 * @vsi: the VSI having resources freed
 */
void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->rx_rings)
		return;

	ice_for_each_rxq(vsi, i)
		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
			ice_free_rx_ring(vsi->rx_rings[i]);
}

2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
/**
 * ice_vsi_close - Shut down a VSI
 * @vsi: the VSI being shut down
 */
void ice_vsi_close(struct ice_vsi *vsi)
{
	if (!test_and_set_bit(__ICE_DOWN, vsi->state))
		ice_down(vsi);

	ice_vsi_free_irq(vsi);
	ice_vsi_free_tx_rings(vsi);
	ice_vsi_free_rx_rings(vsi);
}

2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
/**
 * ice_ena_vsi - resume a VSI
 * @vsi: the VSI being resume
 * @locked: is the rtnl_lock already held
 */
int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
	int err = 0;

	if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
		return 0;

	clear_bit(__ICE_NEEDS_RESTART, vsi->state);

	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
		if (netif_running(vsi->netdev)) {
			if (!locked)
				rtnl_lock();

			err = ice_open(vsi->netdev);

			if (!locked)
				rtnl_unlock();
		}
2493 2494
	} else if (vsi->type == ICE_VSI_CTRL) {
		err = ice_vsi_open_ctrl(vsi);
2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
	}

	return err;
}

/**
 * ice_dis_vsi - pause a VSI
 * @vsi: the VSI being paused
 * @locked: is the rtnl_lock already held
 */
void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
	if (test_bit(__ICE_DOWN, vsi->state))
		return;

	set_bit(__ICE_NEEDS_RESTART, vsi->state);

	if (vsi->type == ICE_VSI_PF && vsi->netdev) {
		if (netif_running(vsi->netdev)) {
			if (!locked)
				rtnl_lock();

			ice_stop(vsi->netdev);

			if (!locked)
				rtnl_unlock();
		} else {
			ice_vsi_close(vsi);
		}
2524 2525
	} else if (vsi->type == ICE_VSI_CTRL) {
		ice_vsi_close(vsi);
2526 2527 2528
	}
}

2529 2530 2531 2532 2533 2534
/**
 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
 * @vsi: the VSI being un-configured
 */
void ice_vsi_dis_irq(struct ice_vsi *vsi)
{
B
Brett Creeley 已提交
2535
	int base = vsi->base_vector;
2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
	struct ice_pf *pf = vsi->back;
	struct ice_hw *hw = &pf->hw;
	u32 val;
	int i;

	/* disable interrupt causation from each queue */
	if (vsi->tx_rings) {
		ice_for_each_txq(vsi, i) {
			if (vsi->tx_rings[i]) {
				u16 reg;

				reg = vsi->tx_rings[i]->reg_idx;
				val = rd32(hw, QINT_TQCTL(reg));
				val &= ~QINT_TQCTL_CAUSE_ENA_M;
				wr32(hw, QINT_TQCTL(reg), val);
			}
		}
	}

	if (vsi->rx_rings) {
		ice_for_each_rxq(vsi, i) {
			if (vsi->rx_rings[i]) {
				u16 reg;

				reg = vsi->rx_rings[i]->reg_idx;
				val = rd32(hw, QINT_RQCTL(reg));
				val &= ~QINT_RQCTL_CAUSE_ENA_M;
				wr32(hw, QINT_RQCTL(reg), val);
			}
		}
	}

	/* disable each interrupt */
T
Tony Nguyen 已提交
2569 2570 2571
	ice_for_each_q_vector(vsi, i) {
		if (!vsi->q_vectors[i])
			continue;
2572
		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
T
Tony Nguyen 已提交
2573
	}
2574

2575
	ice_flush(hw);
2576

2577 2578 2579 2580
	/* don't call synchronize_irq() for VF's from the host */
	if (vsi->type == ICE_VSI_VF)
		return;

2581 2582
	ice_for_each_q_vector(vsi, i)
		synchronize_irq(pf->msix_entries[i + base].vector);
2583 2584
}

2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
/**
 * ice_napi_del - Remove NAPI handler for the VSI
 * @vsi: VSI for which NAPI handler is to be removed
 */
void ice_napi_del(struct ice_vsi *vsi)
{
	int v_idx;

	if (!vsi->netdev)
		return;

	ice_for_each_q_vector(vsi, v_idx)
		netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}

2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612
/**
 * ice_vsi_release - Delete a VSI and free its resources
 * @vsi: the VSI being removed
 *
 * Returns 0 on success or < 0 on error
 */
int ice_vsi_release(struct ice_vsi *vsi)
{
	struct ice_pf *pf;

	if (!vsi->back)
		return -ENODEV;
	pf = vsi->back;
2613

2614 2615 2616 2617 2618
	/* do not unregister while driver is in the reset recovery pending
	 * state. Since reset/rebuild happens through PF service task workqueue,
	 * it's not a good idea to unregister netdev that is associated to the
	 * PF that is running the work queue items currently. This is done to
	 * avoid check_flush_dependency() warning on this wq
2619
	 */
2620
	if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
2621
		unregister_netdev(vsi->netdev);
2622 2623
		ice_devlink_destroy_port(vsi);
	}
2624 2625 2626 2627 2628

	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
		ice_rss_clean(vsi);

	/* Disable VSI and free resources */
2629 2630
	if (vsi->type != ICE_VSI_LB)
		ice_vsi_dis_irq(vsi);
2631 2632
	ice_vsi_close(vsi);

B
Brett Creeley 已提交
2633 2634 2635 2636 2637
	/* SR-IOV determines needed MSIX resources all at once instead of per
	 * VSI since when VFs are spawned we know how many VFs there are and how
	 * many interrupts each VF needs. SR-IOV MSIX resources are also
	 * cleared in the same manner.
	 */
2638 2639
	if (vsi->type != ICE_VSI_VF) {
		/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2640
		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2641 2642
		pf->num_avail_sw_msix += vsi->num_q_vectors;
	}
2643

T
Tony Nguyen 已提交
2644 2645
	if (!ice_is_safe_mode(pf)) {
		if (vsi->type == ICE_VSI_PF) {
2646 2647
			ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
					    ICE_DROP_PACKET);
T
Tony Nguyen 已提交
2648 2649 2650 2651 2652 2653 2654
			ice_cfg_sw_lldp(vsi, true, false);
			/* The Rx rule will only exist to remove if the LLDP FW
			 * engine is currently stopped
			 */
			if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
				ice_cfg_sw_lldp(vsi, false, false);
		}
2655
	}
2656

2657
	ice_fltr_remove_all(vsi);
2658
	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2659 2660
	ice_vsi_delete(vsi);
	ice_vsi_free_q_vectors(vsi);
2661 2662 2663 2664 2665 2666 2667

	/* make sure unregister_netdev() was called by checking __ICE_DOWN */
	if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
		free_netdev(vsi->netdev);
		vsi->netdev = NULL;
	}

2668 2669 2670 2671 2672 2673 2674 2675
	ice_vsi_clear_rings(vsi);

	ice_vsi_put_qs(vsi);

	/* retain SW VSI data structure since it is needed to unregister and
	 * free VSI netdev when PF is not in reset recovery pending state,\
	 * for ex: during rmmod.
	 */
2676
	if (!ice_is_reset_in_progress(pf->state))
2677 2678 2679 2680 2681
		ice_vsi_clear(vsi);

	return 0;
}

2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
/**
 * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
 * @q_vector: pointer to q_vector which is being updated
 * @coalesce: pointer to array of struct with stored coalesce
 *
 * Set coalesce param in q_vector and update these parameters in HW.
 */
static void
ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
				struct ice_coalesce_stored *coalesce)
{
	struct ice_ring_container *rx_rc = &q_vector->rx;
	struct ice_ring_container *tx_rc = &q_vector->tx;
	struct ice_hw *hw = &q_vector->vsi->back->hw;

	tx_rc->itr_setting = coalesce->itr_tx;
	rx_rc->itr_setting = coalesce->itr_rx;

	/* dynamic ITR values will be updated during Tx/Rx */
	if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
		wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
		     ITR_REG_ALIGN(tx_rc->itr_setting) >>
		     ICE_ITR_GRAN_S);
	if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
		wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
		     ITR_REG_ALIGN(rx_rc->itr_setting) >>
		     ICE_ITR_GRAN_S);

	q_vector->intrl = coalesce->intrl;
	wr32(hw, GLINT_RATE(q_vector->reg_idx),
	     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
}

/**
 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
 * @vsi: VSI connected with q_vectors
 * @coalesce: array of struct with stored coalesce
 *
 * Returns array size.
 */
static int
ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
			     struct ice_coalesce_stored *coalesce)
{
	int i;

	ice_for_each_q_vector(vsi, i) {
		struct ice_q_vector *q_vector = vsi->q_vectors[i];

		coalesce[i].itr_tx = q_vector->tx.itr_setting;
		coalesce[i].itr_rx = q_vector->rx.itr_setting;
		coalesce[i].intrl = q_vector->intrl;
	}

	return vsi->num_q_vectors;
}

/**
 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
 * @vsi: VSI connected with q_vectors
 * @coalesce: pointer to array of struct with stored coalesce
 * @size: size of coalesce array
 *
 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
 * to default value.
 */
static void
ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
			     struct ice_coalesce_stored *coalesce, int size)
{
	int i;

	if ((size && !coalesce) || !vsi)
		return;

	for (i = 0; i < size && i < vsi->num_q_vectors; i++)
		ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
						&coalesce[i]);

2762 2763 2764 2765 2766
	/* number of q_vectors increased, so assume coalesce settings were
	 * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
	 * the previous settings from q_vector 0 for all of the new q_vectors
	 */
	for (; i < vsi->num_q_vectors; i++)
2767
		ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
2768
						&coalesce[0]);
2769 2770
}

2771 2772 2773
/**
 * ice_vsi_rebuild - Rebuild VSI after reset
 * @vsi: VSI to be rebuild
2774
 * @init_vsi: is this an initialization or a reconfigure of the VSI
2775 2776 2777
 *
 * Returns 0 on success and negative value on failure
 */
2778
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
2779 2780
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2781 2782
	struct ice_coalesce_stored *coalesce;
	int prev_num_q_vectors = 0;
2783
	struct ice_vf *vf = NULL;
2784
	enum ice_status status;
2785
	struct ice_pf *pf;
2786 2787 2788 2789 2790
	int ret, i;

	if (!vsi)
		return -EINVAL;

2791
	pf = vsi->back;
2792 2793 2794
	if (vsi->type == ICE_VSI_VF)
		vf = &pf->vf[vsi->vf_id];

2795 2796 2797 2798 2799
	coalesce = kcalloc(vsi->num_q_vectors,
			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);
	if (coalesce)
		prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
								  coalesce);
2800
	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2801
	ice_vsi_free_q_vectors(vsi);
2802

B
Brett Creeley 已提交
2803 2804 2805 2806 2807
	/* SR-IOV determines needed MSIX resources all at once instead of per
	 * VSI since when VFs are spawned we know how many VFs there are and how
	 * many interrupts each VF needs. SR-IOV MSIX resources are also
	 * cleared in the same manner.
	 */
2808 2809
	if (vsi->type != ICE_VSI_VF) {
		/* reclaim SW interrupts back to the common pool */
B
Brett Creeley 已提交
2810
		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2811
		pf->num_avail_sw_msix += vsi->num_q_vectors;
B
Brett Creeley 已提交
2812
		vsi->base_vector = 0;
2813 2814
	}

M
Maciej Fijalkowski 已提交
2815 2816 2817 2818 2819
	if (ice_is_xdp_ena_vsi(vsi))
		/* return value check can be skipped here, it always returns
		 * 0 if reset is in progress
		 */
		ice_destroy_xdp_rings(vsi);
2820
	ice_vsi_put_qs(vsi);
2821
	ice_vsi_clear_rings(vsi);
2822
	ice_vsi_free_arrays(vsi);
2823 2824 2825 2826
	if (vsi->type == ICE_VSI_VF)
		ice_vsi_set_num_qs(vsi, vf->vf_id);
	else
		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
2827 2828 2829 2830 2831 2832

	ret = ice_vsi_alloc_arrays(vsi);
	if (ret < 0)
		goto err_vsi;

	ice_vsi_get_qs(vsi);
2833 2834

	ice_alloc_fd_res(vsi);
2835
	ice_vsi_set_tc_cfg(vsi);
2836 2837

	/* Initialize VSI struct elements and create VSI in FW */
2838
	ret = ice_vsi_init(vsi, init_vsi);
2839 2840 2841 2842
	if (ret < 0)
		goto err_vsi;

	switch (vsi->type) {
2843
	case ICE_VSI_CTRL:
2844 2845 2846 2847 2848
	case ICE_VSI_PF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto err_rings;

2849 2850 2851 2852
		ret = ice_vsi_setup_vector_base(vsi);
		if (ret)
			goto err_vectors;

2853 2854 2855 2856
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto err_vectors;

2857 2858 2859 2860 2861
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto err_vectors;

		ice_vsi_map_rings_to_vectors(vsi);
M
Maciej Fijalkowski 已提交
2862
		if (ice_is_xdp_ena_vsi(vsi)) {
2863
			vsi->num_xdp_txq = vsi->alloc_rxq;
M
Maciej Fijalkowski 已提交
2864 2865 2866 2867
			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
			if (ret)
				goto err_vectors;
		}
2868 2869 2870 2871 2872 2873 2874 2875
		/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
		if (vsi->type != ICE_VSI_CTRL)
			/* Do not exit if configuring RSS had an issue, at
			 * least receive traffic on first queue. Hence no
			 * need to capture return value
			 */
			if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
				ice_vsi_cfg_rss_lut_key(vsi);
2876
		break;
2877 2878 2879 2880 2881
	case ICE_VSI_VF:
		ret = ice_vsi_alloc_q_vectors(vsi);
		if (ret)
			goto err_rings;

2882 2883 2884 2885
		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
		if (ret)
			goto err_vectors;

2886 2887 2888 2889 2890
		ret = ice_vsi_alloc_rings(vsi);
		if (ret)
			goto err_vectors;

		break;
2891 2892 2893 2894 2895
	default:
		break;
	}

	/* configure VSI nodes based on number of queues and TC's */
M
Maciej Fijalkowski 已提交
2896
	for (i = 0; i < vsi->tc_cfg.numtc; i++) {
2897
		max_txqs[i] = vsi->alloc_txq;
2898

M
Maciej Fijalkowski 已提交
2899 2900 2901 2902
		if (ice_is_xdp_ena_vsi(vsi))
			max_txqs[i] += vsi->num_xdp_txq;
	}

2903 2904 2905
	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);
	if (status) {
2906 2907
		dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
			vsi->vsi_num, ice_stat_str(status));
2908 2909 2910 2911 2912 2913
		if (init_vsi) {
			ret = -EIO;
			goto err_vectors;
		} else {
			return ice_schedule_reset(pf, ICE_RESET_PFR);
		}
2914
	}
2915 2916 2917
	ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
	kfree(coalesce);

2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930
	return 0;

err_vectors:
	ice_vsi_free_q_vectors(vsi);
err_rings:
	if (vsi->netdev) {
		vsi->current_netdev_flags = 0;
		unregister_netdev(vsi->netdev);
		free_netdev(vsi->netdev);
		vsi->netdev = NULL;
	}
err_vsi:
	ice_vsi_clear(vsi);
2931
	set_bit(__ICE_RESET_FAILED, pf->state);
2932
	kfree(coalesce);
2933 2934 2935
	return ret;
}

2936
/**
2937
 * ice_is_reset_in_progress - check for a reset in progress
2938
 * @state: PF state field
2939
 */
2940
bool ice_is_reset_in_progress(unsigned long *state)
2941
{
2942
	return test_bit(__ICE_RESET_OICR_RECV, state) ||
D
Dave Ertman 已提交
2943
	       test_bit(__ICE_DCBNL_DEVRESET, state) ||
2944 2945 2946
	       test_bit(__ICE_PFR_REQ, state) ||
	       test_bit(__ICE_CORER_REQ, state) ||
	       test_bit(__ICE_GLOBR_REQ, state);
2947
}
2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974

#ifdef CONFIG_DCB
/**
 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
 * @vsi: VSI being configured
 * @ctx: the context buffer returned from AQ VSI update command
 */
static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
{
	vsi->info.mapping_flags = ctx->info.mapping_flags;
	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
	       sizeof(vsi->info.q_mapping));
	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
	       sizeof(vsi->info.tc_mapping));
}

/**
 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
 * @vsi: VSI to be configured
 * @ena_tc: TC bitmap
 *
 * VSI queues expected to be quiesced before calling this function
 */
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
	struct ice_pf *pf = vsi->back;
T
Tony Nguyen 已提交
2975
	struct ice_vsi_ctx *ctx;
2976
	enum ice_status status;
B
Brett Creeley 已提交
2977
	struct device *dev;
2978 2979 2980
	int i, ret = 0;
	u8 num_tc = 0;

B
Brett Creeley 已提交
2981 2982
	dev = ice_pf_to_dev(pf);

2983 2984 2985 2986 2987
	ice_for_each_traffic_class(i) {
		/* build bitmap of enabled TCs */
		if (ena_tc & BIT(i))
			num_tc++;
		/* populate max_txqs per TC */
2988
		max_txqs[i] = vsi->alloc_txq;
2989 2990 2991 2992 2993
	}

	vsi->tc_cfg.ena_tc = ena_tc;
	vsi->tc_cfg.numtc = num_tc;

2994
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
	if (!ctx)
		return -ENOMEM;

	ctx->vf_num = 0;
	ctx->info = vsi->info;

	ice_vsi_setup_q_map(vsi, ctx);

	/* must to indicate which section of VSI context are being modified */
	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
	status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
	if (status) {
B
Brett Creeley 已提交
3007
		dev_info(dev, "Failed VSI Update\n");
3008 3009 3010 3011 3012 3013 3014 3015
		ret = -EIO;
		goto out;
	}

	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
				 max_txqs);

	if (status) {
3016 3017
		dev_err(dev, "VSI %d failed TC config, error %s\n",
			vsi->vsi_num, ice_stat_str(status));
3018 3019 3020 3021 3022 3023 3024 3025
		ret = -EIO;
		goto out;
	}
	ice_vsi_update_q_map(vsi, ctx);
	vsi->info.valid_sections = 0;

	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
out:
3026
	kfree(ctx);
3027 3028 3029
	return ret;
}
#endif /* CONFIG_DCB */
3030

3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
/**
 * ice_update_ring_stats - Update ring statistics
 * @ring: ring to update
 * @cont: used to increment per-vector counters
 * @pkts: number of processed packets
 * @bytes: number of processed bytes
 *
 * This function assumes that caller has acquired a u64_stats_sync lock.
 */
static void
ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
		      u64 pkts, u64 bytes)
{
	ring->stats.bytes += bytes;
	ring->stats.pkts += pkts;
	cont->total_bytes += bytes;
	cont->total_pkts += pkts;
}

/**
 * ice_update_tx_ring_stats - Update Tx ring specific counters
 * @tx_ring: ring to update
 * @pkts: number of processed packets
 * @bytes: number of processed bytes
 */
void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
{
	u64_stats_update_begin(&tx_ring->syncp);
	ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
	u64_stats_update_end(&tx_ring->syncp);
}

/**
 * ice_update_rx_ring_stats - Update Rx ring specific counters
 * @rx_ring: ring to update
 * @pkts: number of processed packets
 * @bytes: number of processed bytes
 */
void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
{
	u64_stats_update_begin(&rx_ring->syncp);
	ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
	u64_stats_update_end(&rx_ring->syncp);
}

3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099
/**
 * ice_status_to_errno - convert from enum ice_status to Linux errno
 * @err: ice_status value to convert
 */
int ice_status_to_errno(enum ice_status err)
{
	switch (err) {
	case ICE_SUCCESS:
		return 0;
	case ICE_ERR_DOES_NOT_EXIST:
		return -ENOENT;
	case ICE_ERR_OUT_OF_RANGE:
		return -ENOTTY;
	case ICE_ERR_PARAM:
		return -EINVAL;
	case ICE_ERR_NO_MEMORY:
		return -ENOMEM;
	case ICE_ERR_MAX_LIMIT:
		return -EAGAIN;
	default:
		return -EINVAL;
	}
}

3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157
/**
 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
 * @sw: switch to check if its default forwarding VSI is free
 *
 * Return true if the default forwarding VSI is already being used, else returns
 * false signalling that it's available to use.
 */
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
{
	return (sw->dflt_vsi && sw->dflt_vsi_ena);
}

/**
 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
 * @sw: switch for the default forwarding VSI to compare against
 * @vsi: VSI to compare against default forwarding VSI
 *
 * If this VSI passed in is the default forwarding VSI then return true, else
 * return false
 */
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
	return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
}

/**
 * ice_set_dflt_vsi - set the default forwarding VSI
 * @sw: switch used to assign the default forwarding VSI
 * @vsi: VSI getting set as the default forwarding VSI on the switch
 *
 * If the VSI passed in is already the default VSI and it's enabled just return
 * success.
 *
 * If there is already a default VSI on the switch and it's enabled then return
 * -EEXIST since there can only be one default VSI per switch.
 *
 *  Otherwise try to set the VSI passed in as the switch's default VSI and
 *  return the result.
 */
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
	enum ice_status status;
	struct device *dev;

	if (!sw || !vsi)
		return -EINVAL;

	dev = ice_pf_to_dev(vsi->back);

	/* the VSI passed in is already the default VSI */
	if (ice_is_vsi_dflt_vsi(sw, vsi)) {
		dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
			vsi->vsi_num);
		return 0;
	}

	/* another VSI is already the default VSI for this switch */
	if (ice_is_dflt_vsi_in_use(sw)) {
3158
		dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
3159 3160 3161 3162 3163 3164
			sw->dflt_vsi->vsi_num);
		return -EEXIST;
	}

	status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
	if (status) {
3165 3166
		dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
			vsi->vsi_num, ice_stat_str(status));
3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203
		return -EIO;
	}

	sw->dflt_vsi = vsi;
	sw->dflt_vsi_ena = true;

	return 0;
}

/**
 * ice_clear_dflt_vsi - clear the default forwarding VSI
 * @sw: switch used to clear the default VSI
 *
 * If the switch has no default VSI or it's not enabled then return error.
 *
 * Otherwise try to clear the default VSI and return the result.
 */
int ice_clear_dflt_vsi(struct ice_sw *sw)
{
	struct ice_vsi *dflt_vsi;
	enum ice_status status;
	struct device *dev;

	if (!sw)
		return -EINVAL;

	dev = ice_pf_to_dev(sw->pf);

	dflt_vsi = sw->dflt_vsi;

	/* there is no default VSI configured */
	if (!ice_is_dflt_vsi_in_use(sw))
		return -ENODEV;

	status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
				  ICE_FLTR_RX);
	if (status) {
3204 3205
		dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
			dflt_vsi->vsi_num, ice_stat_str(status));
3206 3207 3208 3209 3210 3211 3212 3213
		return -EIO;
	}

	sw->dflt_vsi = NULL;
	sw->dflt_vsi_ena = false;

	return 0;
}