en_rx.c 34.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

34
#include <net/busy_poll.h>
35
#include <linux/bpf.h>
36
#include <linux/bpf_trace.h>
37
#include <linux/mlx4/cq.h>
38
#include <linux/slab.h>
39 40
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
41
#include <linux/rculist.h>
42 43 44
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
45
#include <linux/irq.h>
46

47 48 49 50
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif

51 52
#include "mlx4_en.h"

53 54 55
static int mlx4_alloc_page(struct mlx4_en_priv *priv,
			   struct mlx4_en_rx_alloc *frag,
			   gfp_t gfp)
56 57 58 59
{
	struct page *page;
	dma_addr_t dma;

E
Eric Dumazet 已提交
60 61 62 63
	page = alloc_page(gfp);
	if (unlikely(!page))
		return -ENOMEM;
	dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
64
	if (unlikely(dma_mapping_error(priv->ddev, dma))) {
65
		__free_page(page);
66 67
		return -ENOMEM;
	}
68 69 70
	frag->page = page;
	frag->dma = dma;
	frag->page_offset = priv->rx_headroom;
71 72 73
	return 0;
}

74
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
75
			       struct mlx4_en_rx_ring *ring,
76 77
			       struct mlx4_en_rx_desc *rx_desc,
			       struct mlx4_en_rx_alloc *frags,
78
			       gfp_t gfp)
79
{
80
	int i;
81

82
	for (i = 0; i < priv->num_frags; i++, frags++) {
83 84 85 86 87
		if (!frags->page) {
			if (mlx4_alloc_page(priv, frags, gfp))
				return -ENOMEM;
			ring->rx_alloc_pages++;
		}
88 89
		rx_desc->data[i].addr = cpu_to_be64(frags->dma +
						    frags->page_offset);
90 91 92 93
	}
	return 0;
}

94 95
static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
			      struct mlx4_en_rx_alloc *frag)
96
{
97 98
	if (frag->page) {
		dma_unmap_page(priv->ddev, frag->dma,
E
Eric Dumazet 已提交
99
			       PAGE_SIZE, priv->dma_dir);
100
		__free_page(frag->page);
101
	}
102 103 104 105
	/* We need to clear all fields, otherwise a change of priv->log_rx_info
	 * could lead to see garbage later in frag->page.
	 */
	memset(frag, 0, sizeof(*frag));
106 107
}

108
static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
				 struct mlx4_en_rx_ring *ring, int index)
{
	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
	int possible_frags;
	int i;

	/* Set size and memtype fields */
	for (i = 0; i < priv->num_frags; i++) {
		rx_desc->data[i].byte_count =
			cpu_to_be32(priv->frag_info[i].frag_size);
		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
	}

	/* If the number of used fragments does not fill up the ring stride,
	 * remaining (unused) fragments must be padded with null address/size
	 * and a special memory key */
	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
	for (i = priv->num_frags; i < possible_frags; i++) {
		rx_desc->data[i].byte_count = 0;
		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
		rx_desc->data[i].addr = 0;
	}
}

static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
134 135
				   struct mlx4_en_rx_ring *ring, int index,
				   gfp_t gfp)
136
{
137 138
	struct mlx4_en_rx_desc *rx_desc = ring->buf +
		(index << ring->log_stride);
139 140
	struct mlx4_en_rx_alloc *frags = ring->rx_info +
					(index << priv->log_rx_info);
141
	if (likely(ring->page_cache.index > 0)) {
142 143 144 145 146 147 148 149 150
		/* XDP uses a single page per frame */
		if (!frags->page) {
			ring->page_cache.index--;
			frags->page = ring->page_cache.buf[ring->page_cache.index].page;
			frags->dma  = ring->page_cache.buf[ring->page_cache.index].dma;
		}
		frags->page_offset = XDP_PACKET_HEADROOM;
		rx_desc->data[0].addr = cpu_to_be64(frags->dma +
						    XDP_PACKET_HEADROOM);
151 152 153
		return 0;
	}

154
	return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
155 156
}

157
static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
158 159 160 161
{
	return ring->prod == ring->cons;
}

162 163 164 165 166
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}

167 168
/* slow path */
static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
169 170 171
				 struct mlx4_en_rx_ring *ring,
				 int index)
{
172
	struct mlx4_en_rx_alloc *frags;
173 174
	int nr;

175
	frags = ring->rx_info + (index << priv->log_rx_info);
176
	for (nr = 0; nr < priv->num_frags; nr++) {
177
		en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
178
		mlx4_en_free_frag(priv, frags + nr);
179 180 181
	}
}

182
/* Function not in fast-path */
183 184 185 186 187
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int ring_ind;
	int buf_ind;
188
	int new_size;
189 190 191

	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
192
			ring = priv->rx_ring[ring_ind];
193 194

			if (mlx4_en_prepare_rx_desc(priv, ring,
195
						    ring->actual_size,
196
						    GFP_KERNEL | __GFP_COLD)) {
197
				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
J
Joe Perches 已提交
198
					en_err(priv, "Failed to allocate enough rx buffers\n");
199 200
					return -ENOMEM;
				} else {
201
					new_size = rounddown_pow_of_two(ring->actual_size);
J
Joe Perches 已提交
202
					en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
203
						ring->actual_size, new_size);
204
					goto reduce_rings;
205 206 207 208 209 210
				}
			}
			ring->actual_size++;
			ring->prod++;
		}
	}
211 212 213 214
	return 0;

reduce_rings:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
215
		ring = priv->rx_ring[ring_ind];
216 217 218 219 220 221 222
		while (ring->actual_size > new_size) {
			ring->actual_size--;
			ring->prod--;
			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
		}
	}

223 224 225 226 227 228 229 230
	return 0;
}

static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
	int index;

231 232
	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
	       ring->cons, ring->prod);
233 234

	/* Unmap and free Rx buffers */
235
	for (index = 0; index < ring->size; index++) {
236
		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
237
		mlx4_en_free_rx_desc(priv, ring, index);
238
	}
239 240
	ring->cons = 0;
	ring->prod = 0;
241 242
}

243 244 245 246
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
{
	int i;
	int num_of_eqs;
247
	int num_rx_rings;
248 249 250
	struct mlx4_dev *dev = mdev->dev;

	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
M
Matan Barak 已提交
251 252 253 254
		num_of_eqs = max_t(int, MIN_RX_RINGS,
				   min_t(int,
					 mlx4_get_eqs_per_port(mdev->dev, i),
					 DEF_RX_RINGS));
255

256 257 258
		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
			min_t(int, num_of_eqs,
			      netif_get_num_default_rss_queues());
259
		mdev->profile.prof[i].rx_ring_num =
260
			rounddown_pow_of_two(num_rx_rings);
261 262 263
	}
}

264
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
265
			   struct mlx4_en_rx_ring **pring,
266
			   u32 size, u16 stride, int node)
267 268
{
	struct mlx4_en_dev *mdev = priv->mdev;
269
	struct mlx4_en_rx_ring *ring;
270
	int err = -ENOMEM;
271 272
	int tmp;

273
	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
274
	if (!ring) {
275 276 277 278 279
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
		if (!ring) {
			en_err(priv, "Failed to allocate RX ring structure\n");
			return -ENOMEM;
		}
280 281
	}

282 283 284 285 286 287
	ring->prod = 0;
	ring->cons = 0;
	ring->size = size;
	ring->size_mask = size - 1;
	ring->stride = stride;
	ring->log_stride = ffs(ring->stride) - 1;
288
	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
289 290

	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
291
					sizeof(struct mlx4_en_rx_alloc));
292
	ring->rx_info = vzalloc_node(tmp, node);
293
	if (!ring->rx_info) {
294
		ring->rx_info = vzalloc(tmp);
295 296 297 298
		if (!ring->rx_info) {
			err = -ENOMEM;
			goto err_ring;
		}
299
	}
300

301
	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
302 303
		 ring->rx_info, tmp);

304
	/* Allocate HW buffers on provided NUMA node */
305
	set_dev_node(&mdev->dev->persist->pdev->dev, node);
306
	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
307
	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
308
	if (err)
309
		goto err_info;
310 311 312

	ring->buf = ring->wqres.buf.direct.buf;

313 314
	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;

315
	*pring = ring;
316 317
	return 0;

318
err_info:
319 320
	vfree(ring->rx_info);
	ring->rx_info = NULL;
321 322 323 324
err_ring:
	kfree(ring);
	*pring = NULL;

325 326 327 328 329 330 331 332 333 334 335 336 337
	return err;
}

int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int i;
	int ring_ind;
	int err;
	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
					DS_SIZE * priv->num_frags);

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
338
		ring = priv->rx_ring[ring_ind];
339 340 341 342

		ring->prod = 0;
		ring->cons = 0;
		ring->actual_size = 0;
343
		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
344 345

		ring->stride = stride;
E
Eugenia Emantayev 已提交
346 347 348 349 350 351
		if (ring->stride <= TXBB_SIZE) {
			/* Stamp first unused send wqe */
			__be32 *ptr = (__be32 *)ring->buf;
			__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
			*ptr = stamp;
			/* Move pointer to start of rx section */
352
			ring->buf += TXBB_SIZE;
E
Eugenia Emantayev 已提交
353
		}
354

355 356 357 358 359 360
		ring->log_stride = ffs(ring->stride) - 1;
		ring->buf_size = ring->size * ring->stride;

		memset(ring->buf, 0, ring->buf_size);
		mlx4_en_update_rx_prod_db(ring);

361
		/* Initialize all descriptors */
362 363 364
		for (i = 0; i < ring->size; i++)
			mlx4_en_init_rx_desc(priv, ring, i);
	}
365 366
	err = mlx4_en_fill_rx_buffers(priv);
	if (err)
367 368 369
		goto err_buffers;

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
370
		ring = priv->rx_ring[ring_ind];
371

372
		ring->size_mask = ring->actual_size - 1;
373 374 375 376 377 378 379
		mlx4_en_update_rx_prod_db(ring);
	}

	return 0;

err_buffers:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
380
		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
381 382 383

	ring_ind = priv->rx_ring_num - 1;
	while (ring_ind >= 0) {
384 385
		if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
			priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
386 387 388 389 390
		ring_ind--;
	}
	return err;
}

391 392 393 394 395 396 397 398 399 400 401 402
/* We recover from out of memory by scheduling our napi poll
 * function (mlx4_en_process_cq), which tries to allocate
 * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
 */
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
{
	int ring;

	if (!priv->port_up)
		return;

	for (ring = 0; ring < priv->rx_ring_num; ring++) {
403 404
		if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
			local_bh_disable();
405
			napi_reschedule(&priv->rx_cq[ring]->napi);
406 407
			local_bh_enable();
		}
408 409 410
	}
}

411 412 413 414 415 416 417 418 419 420 421 422 423 424
/* When the rx ring is running in page-per-packet mode, a released frame can go
 * directly into a small cache, to avoid unmapping or touching the page
 * allocator. In bpf prog performance scenarios, buffers are either forwarded
 * or dropped, never converted to skbs, so every page can come directly from
 * this cache when it is sized to be a multiple of the napi budget.
 */
bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
			struct mlx4_en_rx_alloc *frame)
{
	struct mlx4_en_page_cache *cache = &ring->page_cache;

	if (cache->index >= MLX4_EN_CACHE_SIZE)
		return false;

425 426 427
	cache->buf[cache->index].page = frame->page;
	cache->buf[cache->index].dma = frame->dma;
	cache->index++;
428 429 430
	return true;
}

431
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
432 433
			     struct mlx4_en_rx_ring **pring,
			     u32 size, u16 stride)
434 435
{
	struct mlx4_en_dev *mdev = priv->mdev;
436
	struct mlx4_en_rx_ring *ring = *pring;
437
	struct bpf_prog *old_prog;
438

439 440 441
	old_prog = rcu_dereference_protected(
					ring->xdp_prog,
					lockdep_is_held(&mdev->state_lock));
442 443
	if (old_prog)
		bpf_prog_put(old_prog);
444
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
445 446
	vfree(ring->rx_info);
	ring->rx_info = NULL;
447 448
	kfree(ring);
	*pring = NULL;
449 450 451 452 453
}

void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
454 455 456
	int i;

	for (i = 0; i < ring->page_cache.index; i++) {
457 458 459
		dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
			       PAGE_SIZE, priv->dma_dir);
		put_page(ring->page_cache.buf[i].page);
460 461
	}
	ring->page_cache.index = 0;
462
	mlx4_en_free_rx_buf(priv, ring);
463 464
	if (ring->stride <= TXBB_SIZE)
		ring->buf -= TXBB_SIZE;
465 466 467 468
}


static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
469
				    struct mlx4_en_rx_alloc *frags,
470
				    struct sk_buff *skb,
471 472
				    int length)
{
473 474
	const struct mlx4_en_frag_info *frag_info = priv->frag_info;
	unsigned int truesize = 0;
E
Eric Dumazet 已提交
475
	int nr, frag_size;
476
	struct page *page;
477
	dma_addr_t dma;
478
	bool release;
479

480
	/* Collect used fragments while replacing them in the HW descriptors */
481
	for (nr = 0;; frags++) {
E
Eric Dumazet 已提交
482 483
		frag_size = min_t(int, length, frag_info->frag_size);

484 485
		page = frags->page;
		if (unlikely(!page))
486
			goto fail;
487

488 489 490
		dma = frags->dma;
		dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
					      frag_size, priv->dma_dir);
491

492
		__skb_fill_page_desc(skb, nr, page, frags->page_offset,
E
Eric Dumazet 已提交
493
				     frag_size);
494

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
		truesize += frag_info->frag_stride;
		if (frag_info->frag_stride == PAGE_SIZE / 2) {
			frags->page_offset ^= PAGE_SIZE / 2;
			release = page_count(page) != 1 ||
				  page_is_pfmemalloc(page) ||
				  page_to_nid(page) != numa_mem_id();
		} else {
			u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);

			frags->page_offset += sz_align;
			release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
		}
		if (release) {
			dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
			frags->page = NULL;
		} else {
			page_ref_inc(page);
		}

E
Eric Dumazet 已提交
514 515 516 517 518
		nr++;
		length -= frag_size;
		if (!length)
			break;
		frag_info++;
519
	}
520
	skb->truesize += truesize;
521 522 523 524 525
	return nr;

fail:
	while (nr > 0) {
		nr--;
526
		__skb_frag_unref(skb_shinfo(skb)->frags + nr);
527 528 529 530
	}
	return 0;
}

531
static void validate_loopback(struct mlx4_en_priv *priv, void *va)
532
{
533
	const unsigned char *data = va + ETH_HLEN;
534 535
	int i;

536 537 538
	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
		if (data[i] != (unsigned char)i)
			return;
539 540 541 542
	}
	/* Loopback found */
	priv->loopback_ok = 1;
}
543

544
static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
545
				      struct mlx4_en_rx_ring *ring)
546
{
547
	u32 missing = ring->actual_size - (ring->prod - ring->cons);
548

549 550
	/* Try to batch allocations, but not too much. */
	if (missing < 8)
551
		return;
552 553 554
	do {
		if (mlx4_en_prepare_rx_desc(priv, ring,
					    ring->prod & ring->size_mask,
555 556
					    GFP_ATOMIC | __GFP_COLD |
					    __GFP_MEMALLOC))
557 558
			break;
		ring->prod++;
559
	} while (likely(--missing));
560

561
	mlx4_en_update_rx_prod_db(ring);
562 563
}

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
/* When hardware doesn't strip the vlan, we need to calculate the checksum
 * over it and add it to the hardware's checksum calculation
 */
static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
					 struct vlan_hdr *vlanh)
{
	return csum_add(hw_checksum, *(__wsum *)vlanh);
}

/* Although the stack expects checksum which doesn't include the pseudo
 * header, the HW adds it. To address that, we are subtracting the pseudo
 * header checksum from the checksum value provided by the HW.
 */
static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
				struct iphdr *iph)
{
	__u16 length_for_csum = 0;
	__wsum csum_pseudo_header = 0;

	length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
	csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
						length_for_csum, iph->protocol, 0);
	skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
}

#if IS_ENABLED(CONFIG_IPV6)
/* In IPv6 packets, besides subtracting the pseudo header checksum,
 * we also compute/add the IP header checksum which
 * is not added by the HW.
 */
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
			       struct ipv6hdr *ipv6h)
{
	__wsum csum_pseudo_hdr = 0;

599 600
	if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
		     ipv6h->nexthdr == IPPROTO_HOPOPTS))
601
		return -1;
602
	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
603 604 605 606 607 608 609 610 611 612 613 614

	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
	csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
	csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));

	skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
	skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
	return 0;
}
#endif
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
615
		      netdev_features_t dev_features)
616 617 618 619 620 621 622
{
	__wsum hw_checksum = 0;

	void *hdr = (u8 *)va + sizeof(struct ethhdr);

	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);

623
	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
624
	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
625 626 627 628 629 630 631 632
		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
		hdr += sizeof(struct vlan_hdr);
	}

	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
		get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
	else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
633
		if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
634 635 636 637 638
			return -1;
#endif
	return 0;
}

639 640 641
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
642 643
	int factor = priv->cqe_factor;
	struct mlx4_en_rx_ring *ring;
644
	struct bpf_prog *xdp_prog;
645
	int cq_ring = cq->ring;
646
	bool doorbell_pending;
647
	struct mlx4_cqe *cqe;
648
	int polled = 0;
649
	int index;
650

651
	if (unlikely(!priv->port_up))
652 653
		return 0;

654
	if (unlikely(budget <= 0))
655 656
		return polled;

657 658
	ring = priv->rx_ring[cq_ring];

659 660 661
	/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
	rcu_read_lock();
	xdp_prog = rcu_dereference(ring->xdp_prog);
662
	doorbell_pending = 0;
663

664 665 666 667
	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
668
	cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
669 670 671 672

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {
673 674 675 676 677
		struct mlx4_en_rx_alloc *frags;
		enum pkt_hash_types hash_type;
		struct sk_buff *skb;
		unsigned int length;
		int ip_summed;
678
		void *va;
679
		int nr;
680

681
		frags = ring->rx_info + (index << priv->log_rx_info);
682
		va = page_address(frags[0].page) + frags[0].page_offset;
683
		prefetchw(va);
684 685 686
		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
687
		dma_rmb();
688 689 690 691

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
J
Joe Perches 已提交
692 693 694
			en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
			       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
			       ((struct mlx4_err_cqe *)cqe)->syndrome);
695 696 697
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
698
			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
699 700 701
			goto next;
		}

702 703 704 705
		/* Check if we need to drop the packet if SRIOV is not enabled
		 * and not performing the selftest or flb disabled
		 */
		if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
706
			const struct ethhdr *ethh = va;
707 708 709 710
			dma_addr_t dma;
			/* Get pointer to first fragment since we haven't
			 * skb yet and cast it to ethhdr struct
			 */
711
			dma = frags[0].dma + frags[0].page_offset;
712 713 714
			dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
						DMA_FROM_DEVICE);

715 716 717 718 719 720 721 722
			if (is_multicast_ether_addr(ethh->h_dest)) {
				struct mlx4_mac_entry *entry;
				struct hlist_head *bucket;
				unsigned int mac_hash;

				/* Drop the packet, since HW loopback-ed it */
				mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
				bucket = &priv->mac_hash[mac_hash];
723
				hlist_for_each_entry_rcu(entry, bucket, hlist) {
724
					if (ether_addr_equal_64bits(entry->mac,
725
								    ethh->h_source))
726 727 728
						goto next;
				}
			}
729
		}
730

731 732 733 734 735
		if (unlikely(priv->validate_loopback)) {
			validate_loopback(priv, va);
			goto next;
		}

736 737 738 739
		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
740
		length -= ring->fcs_del;
741

742 743 744 745 746 747
		/* A bpf program gets first chance to drop the packet. It may
		 * read bytes but not past the end of the frag.
		 */
		if (xdp_prog) {
			struct xdp_buff xdp;
			dma_addr_t dma;
748
			void *orig_data;
749 750
			u32 act;

751
			dma = frags[0].dma + frags[0].page_offset;
752 753 754 755
			dma_sync_single_for_cpu(priv->ddev, dma,
						priv->frag_info[0].frag_size,
						DMA_FROM_DEVICE);

756 757
			xdp.data_hard_start = va - frags[0].page_offset;
			xdp.data = va;
758
			xdp.data_end = xdp.data + length;
759
			orig_data = xdp.data;
760 761

			act = bpf_prog_run_xdp(xdp_prog, &xdp);
762 763 764 765 766

			if (xdp.data != orig_data) {
				length = xdp.data_end - xdp.data;
				frags[0].page_offset = xdp.data -
					xdp.data_hard_start;
767
				va = xdp.data;
768 769
			}

770 771 772
			switch (act) {
			case XDP_PASS:
				break;
773
			case XDP_TX:
774
				if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
775
							length, cq_ring,
776 777 778 779
							&doorbell_pending))) {
					frags[0].page = NULL;
					goto next;
				}
780
				trace_xdp_exception(dev, xdp_prog, act);
781
				goto xdp_drop_no_cnt; /* Drop on xmit failure */
782 783 784
			default:
				bpf_warn_invalid_xdp_action(act);
			case XDP_ABORTED:
785
				trace_xdp_exception(dev, xdp_prog, act);
786
			case XDP_DROP:
787 788
				ring->xdp_drop++;
xdp_drop_no_cnt:
789 790 791 792
				goto next;
			}
		}

793 794 795
		ring->bytes += length;
		ring->packets++;

796
		skb = napi_get_frags(&cq->napi);
797
		if (unlikely(!skb))
798 799 800
			goto next;

		if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
801 802 803
			u64 timestamp = mlx4_en_get_cqe_ts(cqe);

			mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
804 805
					       timestamp);
		}
806
		skb_record_rx_queue(skb, cq_ring);
807

808
		if (likely(dev->features & NETIF_F_RXCSUM)) {
809 810 811 812
			if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
						      MLX4_CQE_STATUS_UDP)) {
				if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
				    cqe->checksum == cpu_to_be16(0xffff)) {
813
					bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
814
						(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
815 816 817

					ip_summed = CHECKSUM_UNNECESSARY;
					hash_type = PKT_HASH_TYPE_L4;
818 819
					if (l2_tunnel)
						skb->csum_level = 1;
820 821
					ring->csum_ok++;
				} else {
822
					goto csum_none;
823
				}
824
			} else {
825 826 827
				if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
				    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
							       MLX4_CQE_STATUS_IPV6))) {
828 829 830 831
					if (check_csum(cqe, skb, va, dev->features)) {
						goto csum_none;
					} else {
						ip_summed = CHECKSUM_COMPLETE;
832
						hash_type = PKT_HASH_TYPE_L3;
833 834
						ring->csum_complete++;
					}
835
				} else {
836
					goto csum_none;
837
				}
838 839
			}
		} else {
840
csum_none:
841
			ip_summed = CHECKSUM_NONE;
842
			hash_type = PKT_HASH_TYPE_L3;
843
			ring->csum_none++;
844 845
		}
		skb->ip_summed = ip_summed;
Y
Yevgeny Petrilin 已提交
846
		if (dev->features & NETIF_F_RXHASH)
T
Tom Herbert 已提交
847 848
			skb_set_hash(skb,
				     be32_to_cpu(cqe->immed_rss_invalid),
849
				     hash_type);
850 851 852

		if ((cqe->vlan_my_qpn &
		     cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
853
		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
854 855 856 857
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
					       be16_to_cpu(cqe->sl_vid));
		else if ((cqe->vlan_my_qpn &
			  cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
858 859 860
			 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
					       be16_to_cpu(cqe->sl_vid));
J
Jiri Pirko 已提交
861

862 863 864 865 866 867 868 869 870
		nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
		if (likely(nr)) {
			skb_shinfo(skb)->nr_frags = nr;
			skb->len = length;
			skb->data_len = length;
			napi_gro_frags(&cq->napi);
		} else {
			skb->vlan_tci = 0;
			skb_clear_hash(skb);
871
		}
872 873 874
next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
875
		cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
876
		if (unlikely(++polled == budget))
877
			break;
878 879
	}

880
	rcu_read_unlock();
881

882
	if (likely(polled)) {
883 884 885 886
		if (doorbell_pending) {
			priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
			mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
		}
887 888 889 890 891

		mlx4_cq_set_ci(&cq->mcq);
		wmb(); /* ensure HW sees CQ consumer before we post new buffers */
		ring->cons = cq->mcq.cons_index;
	}
892
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
893

894
	mlx4_en_refill_rx_buffers(priv, ring);
895

896 897 898 899 900 901 902 903 904
	return polled;
}


void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

E
Eric Dumazet 已提交
905 906
	if (likely(priv->port_up))
		napi_schedule_irqoff(&cq->napi);
907 908 909 910 911 912 913 914 915 916
	else
		mlx4_en_arm_cq(priv, cq);
}

/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
917 918
	struct mlx4_en_cq *xdp_tx_cq = NULL;
	bool clean_complete = true;
919 920
	int done;

921 922 923 924 925 926 927 928 929
	if (priv->tx_ring_num[TX_XDP]) {
		xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
		if (xdp_tx_cq->xdp_busy) {
			clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
							       budget);
			xdp_tx_cq->xdp_busy = !clean_complete;
		}
	}

930 931 932
	done = mlx4_en_process_rx_cq(dev, cq, budget);

	/* If we used up all the quota - we're probably not done yet... */
933
	if (done == budget || !clean_complete) {
934
		const struct cpumask *aff;
935 936
		struct irq_data *idata;
		int cpu_curr;
937

938 939 940
		/* in case we got here because of !clean_complete */
		done = budget;

941
		INC_PERF_COUNTER(priv->pstats.napi_quota);
942 943

		cpu_curr = smp_processor_id();
944 945
		idata = irq_desc_get_irq_data(cq->irq_desc);
		aff = irq_data_get_affinity_mask(idata);
946

947 948 949 950
		if (likely(cpumask_test_cpu(cpu_curr, aff)))
			return budget;

		/* Current cpu is not according to smp_irq_affinity -
951 952 953 954
		 * probably affinity changed. Need to stop this NAPI
		 * poll, and restart it on the right CPU.
		 * Try to avoid returning a too small value (like 0),
		 * to not fool net_rx_action() and its netdev_budget
955
		 */
956 957
		if (done)
			done--;
958
	}
E
Eric Dumazet 已提交
959
	/* Done for now */
960
	if (likely(napi_complete_done(napi, done)))
961
		mlx4_en_arm_cq(priv, cq);
962 963 964 965 966 967
	return done;
}

void mlx4_en_calc_rx_buf(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
968
	int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
969 970
	int i = 0;

971 972 973
	/* bpf requires buffers to be set up as 1 packet per page.
	 * This only works when num_frags == 1.
	 */
974
	if (priv->tx_ring_num[TX_XDP]) {
975 976 977
		priv->frag_info[0].frag_size = eff_mtu;
		/* This will gain efficient xdp frame recycling at the
		 * expense of more costly truesize accounting
978
		 */
979
		priv->frag_info[0].frag_stride = PAGE_SIZE;
980
		priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
981
		priv->rx_headroom = XDP_PACKET_HEADROOM;
982 983
		i = 1;
	} else {
E
Eric Dumazet 已提交
984 985 986 987 988
		int frag_size_max = 2048, buf_size = 0;

		/* should not happen, right ? */
		if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
			frag_size_max = PAGE_SIZE;
989 990

		while (buf_size < eff_mtu) {
E
Eric Dumazet 已提交
991 992
			int frag_stride, frag_size = eff_mtu - buf_size;
			int pad, nb;
E
Eric Dumazet 已提交
993 994

			if (i < MLX4_EN_MAX_RX_FRAGS - 1)
E
Eric Dumazet 已提交
995
				frag_size = min(frag_size, frag_size_max);
E
Eric Dumazet 已提交
996 997

			priv->frag_info[i].frag_size = frag_size;
E
Eric Dumazet 已提交
998 999 1000 1001 1002 1003 1004 1005
			frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
			/* We can only pack 2 1536-bytes frames in on 4K page
			 * Therefore, each frame would consume more bytes (truesize)
			 */
			nb = PAGE_SIZE / frag_stride;
			pad = (PAGE_SIZE - nb * frag_stride) / nb;
			pad &= ~(SMP_CACHE_BYTES - 1);
			priv->frag_info[i].frag_stride = frag_stride + pad;
E
Eric Dumazet 已提交
1006 1007

			buf_size += frag_size;
1008 1009
			i++;
		}
1010
		priv->dma_dir = PCI_DMA_FROMDEVICE;
1011
		priv->rx_headroom = 0;
1012 1013 1014 1015
	}

	priv->num_frags = i;
	priv->rx_skb_size = eff_mtu;
1016
	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
1017

J
Joe Perches 已提交
1018 1019
	en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
	       eff_mtu, priv->num_frags);
1020
	for (i = 0; i < priv->num_frags; i++) {
1021 1022
		en_dbg(DRV,
		       priv,
E
Eric Dumazet 已提交
1023
		       "  frag:%d - size:%d stride:%d\n",
1024 1025 1026
		       i,
		       priv->frag_info[i].frag_size,
		       priv->frag_info[i].frag_stride);
1027 1028 1029 1030 1031
	}
}

/* RSS related functions */

1032 1033
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
				 struct mlx4_en_rx_ring *ring,
1034 1035 1036 1037 1038 1039 1040
				 enum mlx4_qp_state *state,
				 struct mlx4_qp *qp)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_qp_context *context;
	int err = 0;

1041 1042
	context = kmalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
1043 1044
		return -ENOMEM;

1045
	err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
1046
	if (err) {
1047
		en_err(priv, "Failed to allocate qp #%x\n", qpn);
1048 1049 1050 1051 1052
		goto out;
	}
	qp->event = mlx4_en_sqp_event;

	memset(context, 0, sizeof *context);
1053
	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
1054
				qpn, ring->cqn, -1, context);
1055
	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
1056

1057
	/* Cancel FCS removal if FW allows */
1058
	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
1059
		context->param3 |= cpu_to_be32(1 << 29);
1060 1061 1062 1063
		if (priv->dev->features & NETIF_F_RXFCS)
			ring->fcs_del = 0;
		else
			ring->fcs_del = ETH_FCS_LEN;
1064 1065
	} else
		ring->fcs_del = 0;
1066

1067
	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
1068 1069 1070 1071
	if (err) {
		mlx4_qp_remove(mdev->dev, qp);
		mlx4_qp_free(mdev->dev, qp);
	}
1072
	mlx4_en_update_rx_prod_db(ring);
1073 1074 1075 1076 1077
out:
	kfree(context);
	return err;
}

1078 1079 1080 1081 1082
int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
{
	int err;
	u32 qpn;

M
Matan Barak 已提交
1083 1084
	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
				    MLX4_RESERVE_A0_QP);
1085 1086 1087 1088
	if (err) {
		en_err(priv, "Failed reserving drop qpn\n");
		return err;
	}
1089
	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
	if (err) {
		en_err(priv, "Failed allocating drop qp\n");
		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
		return err;
	}

	return 0;
}

void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
{
	u32 qpn;

	qpn = priv->drop_qp.qpn;
	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
}

1109 1110 1111 1112 1113 1114
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	struct mlx4_qp_context context;
1115
	struct mlx4_rss_context *rss_context;
1116
	int rss_rings;
1117
	void *ptr;
1118
	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
1119
			MLX4_RSS_TCP_IPV6);
1120
	int i, qpn;
1121 1122
	int err = 0;
	int good_qps = 0;
1123
	u8 flags;
1124

1125
	en_dbg(DRV, priv, "Configuring rss steering\n");
1126 1127

	flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
1128 1129
	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
				    priv->rx_ring_num,
1130
				    &rss_map->base_qpn, flags);
1131
	if (err) {
1132
		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1133 1134 1135
		return err;
	}

1136
	for (i = 0; i < priv->rx_ring_num; i++) {
1137
		qpn = rss_map->base_qpn + i;
1138
		err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
1139 1140 1141 1142 1143 1144 1145 1146
					    &rss_map->state[i],
					    &rss_map->qps[i]);
		if (err)
			goto rss_err;

		++good_qps;
	}

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	if (priv->rx_ring_num == 1) {
		rss_map->indir_qp = &rss_map->qps[0];
		priv->base_qpn = rss_map->indir_qp->qpn;
		en_info(priv, "Optimized Non-RSS steering\n");
		return 0;
	}

	rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL);
	if (!rss_map->indir_qp) {
		err = -ENOMEM;
		goto rss_err;
	}

1160
	/* Configure RSS indirection qp */
1161 1162
	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp,
			    GFP_KERNEL);
1163
	if (err) {
1164
		en_err(priv, "Failed to allocate RSS indirection QP\n");
1165
		goto rss_err;
1166
	}
1167 1168

	rss_map->indir_qp->event = mlx4_en_sqp_event;
1169
	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1170
				priv->rx_ring[0]->cqn, -1, &context);
1171

1172 1173 1174 1175 1176
	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
		rss_rings = priv->rx_ring_num;
	else
		rss_rings = priv->prof->rss_rings;

1177 1178
	ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
1179
	rss_context = ptr;
1180
	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
1181
					    (rss_map->base_qpn));
1182
	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1183 1184 1185 1186
	if (priv->mdev->profile.udp_rss) {
		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
		rss_context->base_qpn_udp = rss_context->default_qpn;
	}
1187 1188 1189 1190 1191 1192

	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
		en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
		rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
	}

Y
Yevgeny Petrilin 已提交
1193
	rss_context->flags = rss_mask;
1194
	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
	if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
		rss_context->hash_fn = MLX4_RSS_HASH_XOR;
	} else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
		rss_context->hash_fn = MLX4_RSS_HASH_TOP;
		memcpy(rss_context->rss_key, priv->rss_key,
		       MLX4_EN_RSS_KEY_SIZE);
	} else {
		en_err(priv, "Unknown RSS hash function requested\n");
		err = -EINVAL;
		goto indir_err;
	}
1206

1207
	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1208
			       rss_map->indir_qp, &rss_map->indir_state);
1209 1210 1211 1212 1213 1214 1215
	if (err)
		goto indir_err;

	return 0;

indir_err:
	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1216 1217 1218 1219 1220
		       MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
	mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
	mlx4_qp_free(mdev->dev, rss_map->indir_qp);
	kfree(rss_map->indir_qp);
	rss_map->indir_qp = NULL;
1221 1222 1223 1224 1225 1226 1227
rss_err:
	for (i = 0; i < good_qps; i++) {
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
1228
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1229 1230 1231 1232 1233 1234 1235 1236 1237
	return err;
}

void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	int i;

1238 1239 1240 1241 1242 1243 1244 1245 1246
	if (priv->rx_ring_num > 1) {
		mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
			       MLX4_QP_STATE_RST, NULL, 0, 0,
			       rss_map->indir_qp);
		mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
		mlx4_qp_free(mdev->dev, rss_map->indir_qp);
		kfree(rss_map->indir_qp);
		rss_map->indir_qp = NULL;
	}
1247

1248
	for (i = 0; i < priv->rx_ring_num; i++) {
1249 1250 1251 1252 1253
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
1254
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1255
}